You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flume.apache.org by rg...@apache.org on 2022/06/02 22:00:44 UTC

[flume-site] branch asf-staging updated: Update site for 1.10.0

This is an automated email from the ASF dual-hosted git repository.

rgoers pushed a commit to branch asf-staging
in repository https://gitbox.apache.org/repos/asf/flume-site.git


The following commit(s) were added to refs/heads/asf-staging by this push:
     new 8e7c137  Update site for 1.10.0
8e7c137 is described below

commit 8e7c137289dadd907a4e0bef6da6b388233a7d4f
Author: Ralph Goers <rg...@apache.org>
AuthorDate: Thu Jun 2 15:00:37 2022 -0700

    Update site for 1.10.0
---
 content/.buildinfo                            |   2 +-
 content/.doctrees/FlumeDeveloperGuide.doctree | Bin 210722 -> 218758 bytes
 content/.doctrees/FlumeUserGuide.doctree      | Bin 2995341 -> 2954795 bytes
 content/.doctrees/download.doctree            | Bin 30265 -> 30357 bytes
 content/.doctrees/environment.pickle          | Bin 189396 -> 194522 bytes
 content/.doctrees/index.doctree               | Bin 199158 -> 206422 bytes
 content/.doctrees/releases/1.10.0.doctree     | Bin 0 -> 69677 bytes
 content/.doctrees/releases/index.doctree      | Bin 5839 -> 5883 bytes
 content/.doctrees/team.doctree                | Bin 94792 -> 107131 bytes
 content/.htaccess                             |   3 +
 content/FlumeDeveloperGuide.html              | 100 +++++-
 content/FlumeUserGuide.html                   | 499 ++++++++++++--------------
 content/_sources/FlumeDeveloperGuide.txt      |  92 ++++-
 content/_sources/FlumeUserGuide.txt           | 296 ++++++++-------
 content/_sources/download.txt                 |   8 +-
 content/_sources/index.txt                    |  28 ++
 content/_sources/releases/1.10.0.txt          |  69 ++++
 content/_sources/releases/index.txt           |   5 +-
 content/_sources/team.txt                     |  70 ++--
 content/documentation.html                    |   4 +-
 content/download.html                         |  18 +-
 content/getinvolved.html                      |   2 +-
 content/index.html                            |  18 +-
 content/license.html                          |   2 +-
 content/mailinglists.html                     |   2 +-
 content/releases/1.0.0.html                   |   2 +-
 content/releases/1.1.0.html                   |   2 +-
 content/releases/{1.8.0.html => 1.10.0.html}  | 125 +++----
 content/releases/1.2.0.html                   |   2 +-
 content/releases/1.3.0.html                   |   2 +-
 content/releases/1.3.1.html                   |   2 +-
 content/releases/1.4.0.html                   |   2 +-
 content/releases/1.5.0.1.html                 |   2 +-
 content/releases/1.5.0.html                   |   2 +-
 content/releases/1.5.2.html                   |   2 +-
 content/releases/1.6.0.html                   |   2 +-
 content/releases/1.7.0.html                   |   2 +-
 content/releases/1.8.0.html                   |   2 +-
 content/releases/1.9.0.html                   |   4 +-
 content/releases/index.html                   |   9 +-
 content/search.html                           |   2 +-
 content/searchindex.js                        |   2 +-
 content/source.html                           |   2 +-
 content/team.html                             | 221 +++++++-----
 content/testing.html                          |   2 +-
 source/sphinx/FlumeDeveloperGuide.rst         |  92 ++++-
 source/sphinx/FlumeUserGuide.rst              | 296 ++++++++-------
 source/sphinx/download.rst                    |   8 +-
 source/sphinx/index.rst                       |  28 ++
 source/sphinx/releases/1.10.0.rst             |  69 ++++
 source/sphinx/releases/index.rst              |   5 +-
 source/sphinx/team.rst                        |  70 ++--
 52 files changed, 1373 insertions(+), 804 deletions(-)

diff --git a/content/.buildinfo b/content/.buildinfo
index a84f43f..b33361e 100644
--- a/content/.buildinfo
+++ b/content/.buildinfo
@@ -1,4 +1,4 @@
 # Sphinx build info version 1
 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 06145c70e0402171cc997b657d43451c
+config: 8746fea04d37df13408ef9431b76a021
 tags: fbb0d17656682115ca4d033fb2f83ba1
diff --git a/content/.doctrees/FlumeDeveloperGuide.doctree b/content/.doctrees/FlumeDeveloperGuide.doctree
index f41429f..6cf9d9c 100644
Binary files a/content/.doctrees/FlumeDeveloperGuide.doctree and b/content/.doctrees/FlumeDeveloperGuide.doctree differ
diff --git a/content/.doctrees/FlumeUserGuide.doctree b/content/.doctrees/FlumeUserGuide.doctree
index 18ff00d..e46721d 100644
Binary files a/content/.doctrees/FlumeUserGuide.doctree and b/content/.doctrees/FlumeUserGuide.doctree differ
diff --git a/content/.doctrees/download.doctree b/content/.doctrees/download.doctree
index cae49cb..a5a6761 100644
Binary files a/content/.doctrees/download.doctree and b/content/.doctrees/download.doctree differ
diff --git a/content/.doctrees/environment.pickle b/content/.doctrees/environment.pickle
index a1bae9d..225585f 100644
Binary files a/content/.doctrees/environment.pickle and b/content/.doctrees/environment.pickle differ
diff --git a/content/.doctrees/index.doctree b/content/.doctrees/index.doctree
index 2d3c33d..9775ac2 100644
Binary files a/content/.doctrees/index.doctree and b/content/.doctrees/index.doctree differ
diff --git a/content/.doctrees/releases/1.10.0.doctree b/content/.doctrees/releases/1.10.0.doctree
new file mode 100644
index 0000000..0c14b12
Binary files /dev/null and b/content/.doctrees/releases/1.10.0.doctree differ
diff --git a/content/.doctrees/releases/index.doctree b/content/.doctrees/releases/index.doctree
index 82403d4..cd54299 100644
Binary files a/content/.doctrees/releases/index.doctree and b/content/.doctrees/releases/index.doctree differ
diff --git a/content/.doctrees/team.doctree b/content/.doctrees/team.doctree
index 2013116..212f5fa 100644
Binary files a/content/.doctrees/team.doctree and b/content/.doctrees/team.doctree differ
diff --git a/content/.htaccess b/content/.htaccess
new file mode 100644
index 0000000..e9708fa
--- /dev/null
+++ b/content/.htaccess
@@ -0,0 +1,3 @@
+ErrorDocument 404 /errors/404.html
+
+
diff --git a/content/FlumeDeveloperGuide.html b/content/FlumeDeveloperGuide.html
index e7d7ffc..4c81338 100644
--- a/content/FlumeDeveloperGuide.html
+++ b/content/FlumeDeveloperGuide.html
@@ -7,7 +7,7 @@
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     
-    <title>Flume 1.9.0 Developer Guide &mdash; Apache Flume</title>
+    <title>Flume 1.10.0 Developer Guide &mdash; Apache Flume</title>
     
     <link rel="stylesheet" href="_static/flume.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -27,7 +27,7 @@
     <link rel="top" title="Apache Flume" href="index.html" />
     <link rel="up" title="Documentation" href="documentation.html" />
     <link rel="next" title="Releases" href="releases/index.html" />
-    <link rel="prev" title="Flume 1.9.0 User Guide" href="FlumeUserGuide.html" /> 
+    <link rel="prev" title="Flume 1.10.0 User Guide" href="FlumeUserGuide.html" /> 
   </head>
   <body>
 <div class="header">
@@ -60,8 +60,8 @@
         <div class="bodywrapper">
           <div class="body">
             
-  <div class="section" id="flume-1-9-0-developer-guide">
-<h1>Flume 1.9.0 Developer Guide<a class="headerlink" href="#flume-1-9-0-developer-guide" title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="flume-1-10-0-developer-guide">
+<h1>Flume 1.10.0 Developer Guide<a class="headerlink" href="#flume-1-10-0-developer-guide" title="Permalink to this headline">¶</a></h1>
 <div class="section" id="introduction">
 <h2>Introduction<a class="headerlink" href="#introduction" title="Permalink to this headline">¶</a></h2>
 <div class="section" id="overview">
@@ -893,6 +893,93 @@ mechanism that captures the new data and stores it into the <tt class="docutils
 <h4>Channel<a class="headerlink" href="#channel" title="Permalink to this headline">¶</a></h4>
 <p>TBD</p>
 </div>
+<div class="section" id="initializable">
+<h4>Initializable<a class="headerlink" href="#initializable" title="Permalink to this headline">¶</a></h4>
+<p>As of Flume 1.10.0 Sources, Sinks, and Channels may implement the Intitializable interface. Doing so
+allows the component to have access the materialized configuration before any of the components have been
+started.</p>
+<p>This example shows a Sink being configured with the name of a Source. While initializing it will
+retrieve the Source from the configuration and save it. During event processing a new event will be
+sent to the Source, presumably after the event has be modified in some way.</p>
+<div class="highlight-java"><div class="highlight"><pre><span class="kd">public</span> <span class="kd">class</span> <span class="nc">NullInitSink</span> <span class="kd">extends</span> <span class="n">NullSink</span> <span class="kd">implements</span> <span class="n">Initializable</span> <span class="o">{</span>
+
+  <span class="kd">private</span> <span class="kd">static</span> <span class="kd">final</span> <span class="n">Logger</span> <span class="n">logger</span> <span class="o">=</span> <span class="n">LoggerFactory</span><span class="o">.</span><span class="na">getLogger</span><span class="o">(</span><span class="n">NullInitSink</span><span class="o">.</span><span class="na">class</span><span class="o">);</span>
+  <span class="kd">private</span> <span class="n">String</span> <span class="n">sourceName</span> <span class="o">=</span> <span class="kc">null</span><span class="o">;</span>
+  <span class="kd">private</span> <span class="n">EventProcessor</span> <span class="n">eventProcessor</span> <span class="o">=</span> <span class="kc">null</span><span class="o">;</span>
+  <span class="kd">private</span> <span class="kt">long</span> <span class="n">total</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span>
+
+  <span class="kd">public</span> <span class="nf">NullInitSink</span><span class="o">()</span> <span class="o">{</span>
+    <span class="kd">super</span><span class="o">();</span>
+  <span class="o">}</span>
+
+  <span class="nd">@Override</span>
+  <span class="kd">public</span> <span class="kt">void</span> <span class="nf">configure</span><span class="o">(</span><span class="n">Context</span> <span class="n">context</span><span class="o">)</span> <span class="o">{</span>
+    <span class="n">sourceName</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="na">getString</span><span class="o">(</span><span class="s">&quot;targetSource&quot;</span><span class="o">);</span>
+    <span class="kd">super</span><span class="o">.</span><span class="na">configure</span><span class="o">(</span><span class="n">context</span><span class="o">);</span>
+
+  <span class="o">}</span>
+
+  <span class="nd">@Override</span>
+  <span class="kd">public</span> <span class="kt">void</span> <span class="nf">initialize</span><span class="o">(</span><span class="n">MaterializedConfiguration</span> <span class="n">configuration</span><span class="o">)</span> <span class="o">{</span>
+    <span class="n">logger</span><span class="o">.</span><span class="na">debug</span><span class="o">(</span><span class="s">&quot;Locating source for event publishing&quot;</span><span class="o">);</span>
+    <span class="k">for</span> <span class="o">(</span><span class="n">Map</span><span class="o">.</span><span class="na">Entry</span><span class="o">&lt;</span><span class="n">String</span><span class="o">,</span> <span class="n">SourceRunner</span><span class="o">&gt;</span>  <span class="n">entry</span> <span class="o">:</span> <span class="n">configuration</span><span class="o">.</span><span class="na">getSourceRunners</span><span class="o">().</span><span class="na">entrySet</span>< [...]
+      <span class="k">if</span> <span class="o">(</span><span class="n">entry</span><span class="o">.</span><span class="na">getKey</span><span class="o">().</span><span class="na">equals</span><span class="o">(</span><span class="n">sourceName</span><span class="o">))</span> <span class="o">{</span>
+        <span class="n">Source</span> <span class="n">source</span> <span class="o">=</span> <span class="n">entry</span><span class="o">.</span><span class="na">getValue</span><span class="o">().</span><span class="na">getSource</span><span class="o">();</span>
+        <span class="k">if</span> <span class="o">(</span><span class="n">source</span> <span class="k">instanceof</span> <span class="n">EventProcessor</span><span class="o">)</span> <span class="o">{</span>
+          <span class="n">eventProcessor</span> <span class="o">=</span> <span class="o">(</span><span class="n">EventProcessor</span><span class="o">)</span> <span class="n">source</span><span class="o">;</span>
+          <span class="n">logger</span><span class="o">.</span><span class="na">debug</span><span class="o">(</span><span class="s">&quot;Found event processor {}&quot;</span><span class="o">,</span> <span class="n">source</span><span class="o">.</span><span class="na">getName</span><span class="o">());</span>
+          <span class="k">return</span><span class="o">;</span>
+        <span class="o">}</span>
+      <span class="o">}</span>
+    <span class="o">}</span>
+    <span class="n">logger</span><span class="o">.</span><span class="na">warn</span><span class="o">(</span><span class="s">&quot;No Source named {} found for republishing events.&quot;</span><span class="o">,</span> <span class="n">sourceName</span><span class="o">);</span>
+  <span class="o">}</span>
+
+  <span class="nd">@Override</span>
+  <span class="kd">public</span> <span class="n">Status</span> <span class="nf">process</span><span class="o">()</span> <span class="kd">throws</span> <span class="n">EventDeliveryException</span> <span class="o">{</span>
+    <span class="n">Status</span> <span class="n">status</span> <span class="o">=</span> <span class="n">Status</span><span class="o">.</span><span class="na">READY</span><span class="o">;</span>
+
+    <span class="n">Channel</span> <span class="n">channel</span> <span class="o">=</span> <span class="n">getChannel</span><span class="o">();</span>
+    <span class="n">Transaction</span> <span class="n">transaction</span> <span class="o">=</span> <span class="n">channel</span><span class="o">.</span><span class="na">getTransaction</span><span class="o">();</span>
+    <span class="n">Event</span> <span class="n">event</span> <span class="o">=</span> <span class="kc">null</span><span class="o">;</span>
+    <span class="n">CounterGroup</span> <span class="n">counterGroup</span> <span class="o">=</span> <span class="n">getCounterGroup</span><span class="o">();</span>
+    <span class="kt">long</span> <span class="n">batchSize</span> <span class="o">=</span> <span class="n">getBatchSize</span><span class="o">();</span>
+    <span class="kt">long</span> <span class="n">eventCounter</span> <span class="o">=</span> <span class="n">counterGroup</span><span class="o">.</span><span class="na">get</span><span class="o">(</span><span class="s">&quot;events.success&quot;</span><span class="o">);</span>
+
+    <span class="k">try</span> <span class="o">{</span>
+      <span class="n">transaction</span><span class="o">.</span><span class="na">begin</span><span class="o">();</span>
+      <span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span>
+      <span class="k">for</span> <span class="o">(</span><span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">batchSize</span><span class="o">;</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
+        <span class="n">event</span> <span class="o">=</span> <span class="n">channel</span><span class="o">.</span><span class="na">take</span><span class="o">();</span>
+        <span class="k">if</span> <span class="o">(</span><span class="n">event</span> <span class="o">!=</span> <span class="kc">null</span><span class="o">)</span> <span class="o">{</span>
+          <span class="kt">long</span> <span class="n">id</span> <span class="o">=</span> <span class="n">Long</span><span class="o">.</span><span class="na">parseLong</span><span class="o">(</span><span class="k">new</span> <span class="n">String</span><span class="o">(</span><span class="n">event</span><span class="o">.</span><span class="na">getBody</span><span class="o">()));</span>
+          <span class="n">total</span> <span class="o">+=</span> <span class="n">id</span><span class="o">;</span>
+          <span class="n">event</span><span class="o">.</span><span class="na">getHeaders</span><span class="o">().</span><span class="na">put</span><span class="o">(</span><span class="s">&quot;Total&quot;</span><span class="o">,</span> <span class="n">Long</span><span class="o">.</span><span class="na">toString</span><span class="o">(</span><span class="n">total</span><span class="o">));</span>
+          <span class="n">eventProcessor</span><span class="o">.</span><span class="na">processEvent</span><span class="o">(</span><span class="n">event</span><span class="o">);</span>
+          <span class="n">logger</span><span class="o">.</span><span class="na">info</span><span class="o">(</span><span class="s">&quot;Null sink {} successful processed event {}&quot;</span><span class="o">,</span> <span class="n">getName</span><span class="o">(),</span> <span class="n">id</span><span class="o">);</span>
+        <span class="o">}</span> <span class="k">else</span> <span class="o">{</span>
+          <span class="n">status</span> <span class="o">=</span> <span class="n">Status</span><span class="o">.</span><span class="na">BACKOFF</span><span class="o">;</span>
+          <span class="k">break</span><span class="o">;</span>
+        <span class="o">}</span>
+      <span class="o">}</span>
+      <span class="n">transaction</span><span class="o">.</span><span class="na">commit</span><span class="o">();</span>
+      <span class="n">counterGroup</span><span class="o">.</span><span class="na">addAndGet</span><span class="o">(</span><span class="s">&quot;events.success&quot;</span><span class="o">,</span> <span class="o">(</span><span class="kt">long</span><span class="o">)</span> <span class="n">Math</span><span class="o">.</span><span class="na">min</span><span class="o">(</span><span class="n">batchSize</span><span class="o">,</span> <span class="n">i</span><span class="o">));</span>
+      <span class="n">counterGroup</span><span class="o">.</span><span class="na">incrementAndGet</span><span class="o">(</span><span class="s">&quot;transaction.success&quot;</span><span class="o">);</span>
+    <span class="o">}</span> <span class="k">catch</span> <span class="o">(</span><span class="n">Exception</span> <span class="n">ex</span><span class="o">)</span> <span class="o">{</span>
+      <span class="n">transaction</span><span class="o">.</span><span class="na">rollback</span><span class="o">();</span>
+      <span class="n">counterGroup</span><span class="o">.</span><span class="na">incrementAndGet</span><span class="o">(</span><span class="s">&quot;transaction.failed&quot;</span><span class="o">);</span>
+      <span class="n">logger</span><span class="o">.</span><span class="na">error</span><span class="o">(</span><span class="s">&quot;Failed to deliver event. Exception follows.&quot;</span><span class="o">,</span> <span class="n">ex</span><span class="o">);</span>
+      <span class="k">throw</span> <span class="k">new</span> <span class="nf">EventDeliveryException</span><span class="o">(</span><span class="s">&quot;Failed to deliver event: &quot;</span> <span class="o">+</span> <span class="n">event</span><span class="o">,</span> <span class="n">ex</span><span class="o">);</span>
+    <span class="o">}</span> <span class="k">finally</span> <span class="o">{</span>
+      <span class="n">transaction</span><span class="o">.</span><span class="na">close</span><span class="o">();</span>
+    <span class="o">}</span>
+
+    <span class="k">return</span> <span class="n">status</span><span class="o">;</span>
+  <span class="o">}</span>
+<span class="o">}</span>
+</pre></div>
+</div>
+</div>
 </div>
 </div>
 </div>
@@ -937,7 +1024,7 @@ mechanism that captures the new data and stores it into the <tt class="docutils
 
 <h3><a href="index.html">This Page</a></h3>
 <ul>
-<li><a class="reference internal" href="#">Flume 1.9.0 Developer Guide</a><ul>
+<li><a class="reference internal" href="#">Flume 1.10.0 Developer Guide</a><ul>
 <li><a class="reference internal" href="#introduction">Introduction</a><ul>
 <li><a class="reference internal" href="#overview">Overview</a></li>
 <li><a class="reference internal" href="#architecture">Architecture</a><ul>
@@ -966,6 +1053,7 @@ mechanism that captures the new data and stores it into the <tt class="docutils
 <li><a class="reference internal" href="#sink">Sink</a></li>
 <li><a class="reference internal" href="#source">Source</a></li>
 <li><a class="reference internal" href="#channel">Channel</a></li>
+<li><a class="reference internal" href="#initializable">Initializable</a></li>
 </ul>
 </li>
 </ul>
@@ -979,7 +1067,7 @@ mechanism that captures the new data and stores it into the <tt class="docutils
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/FlumeUserGuide.html b/content/FlumeUserGuide.html
index a32a636..c57234d 100644
--- a/content/FlumeUserGuide.html
+++ b/content/FlumeUserGuide.html
@@ -7,7 +7,7 @@
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     
-    <title>Flume 1.9.0 User Guide &mdash; Apache Flume</title>
+    <title>Flume 1.10.0 User Guide &mdash; Apache Flume</title>
     
     <link rel="stylesheet" href="_static/flume.css" type="text/css" />
     <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
@@ -26,7 +26,7 @@
     <script type="text/javascript" src="_static/doctools.js"></script>
     <link rel="top" title="Apache Flume" href="index.html" />
     <link rel="up" title="Documentation" href="documentation.html" />
-    <link rel="next" title="Flume 1.9.0 Developer Guide" href="FlumeDeveloperGuide.html" />
+    <link rel="next" title="Flume 1.10.0 Developer Guide" href="FlumeDeveloperGuide.html" />
     <link rel="prev" title="Documentation" href="documentation.html" /> 
   </head>
   <body>
@@ -60,8 +60,8 @@
         <div class="bodywrapper">
           <div class="body">
             
-  <div class="section" id="flume-1-9-0-user-guide">
-<h1>Flume 1.9.0 User Guide<a class="headerlink" href="#flume-1-9-0-user-guide" title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="flume-1-10-0-user-guide">
+<h1>Flume 1.10.0 User Guide<a class="headerlink" href="#flume-1-10-0-user-guide" title="Permalink to this headline">¶</a></h1>
 <div class="section" id="introduction">
 <h2>Introduction<a class="headerlink" href="#introduction" title="Permalink to this headline">¶</a></h2>
 <div class="section" id="overview">
@@ -141,6 +141,8 @@ Flume supports a durable file channel which is backed by the local file system.
 There&#8217;s also a memory channel which simply stores the events in an in-memory
 queue, which is faster but any events still left in the memory channel when an
 agent process dies can&#8217;t be recovered.</p>
+<p>Flume&#8217;s <cite>KafkaChannel</cite> uses Apache Kafka to stage events. Using a replicated
+Kafka topic as a channel helps avoiding event loss in case of a disk failure.</p>
 </div>
 </div>
 </div>
@@ -148,12 +150,11 @@ agent process dies can&#8217;t be recovered.</p>
 <h2>Setup<a class="headerlink" href="#setup" title="Permalink to this headline">¶</a></h2>
 <div class="section" id="setting-up-an-agent">
 <h3>Setting up an agent<a class="headerlink" href="#setting-up-an-agent" title="Permalink to this headline">¶</a></h3>
-<p>Flume agent configuration is stored in a local configuration file.  This is a
-text file that follows the Java properties file format.
-Configurations for one or more agents can be specified in the same
-configuration file. The configuration file includes properties of each source,
-sink and channel in an agent and how they are wired together to form data
-flows.</p>
+<p>Flume agent configuration is stored in one or more configuration files that
+follow the Java properties file format. Configurations for one or more agents
+can be specified in these configuration files. The configuration includes
+properties of each source, sink and channel in an agent and how they are wired
+together to form data flows.</p>
 <div class="section" id="configuring-individual-components">
 <h4>Configuring individual components<a class="headerlink" href="#configuring-individual-components" title="Permalink to this headline">¶</a></h4>
 <p>Each component (source, sink or channel) in the flow has a name, type, and set
@@ -242,27 +243,105 @@ OK</pre>
 </div>
 <p>Congratulations - you&#8217;ve successfully configured and deployed a Flume agent! Subsequent sections cover agent configuration in much more detail.</p>
 </div>
-<div class="section" id="using-environment-variables-in-configuration-files">
-<h4>Using environment variables in configuration files<a class="headerlink" href="#using-environment-variables-in-configuration-files" title="Permalink to this headline">¶</a></h4>
+<div class="section" id="configuration-from-uris">
+<h4>Configuration from URIs<a class="headerlink" href="#configuration-from-uris" title="Permalink to this headline">¶</a></h4>
+<p>As of version 1.10.0 Flume supports being configured using URIs instead of just from local files. Direct support
+for HTTP(S), file, and classpath URIs is included. The HTTP support includes support for authentication using
+basic authorization but other authorization mechanisms may be supported by specifying the fully qualified name
+of the class that implements the AuthorizationProvider interface using the &#8211;auth-provider option. HTTP also
+supports reloading of configuration files using polling if the target server properly responds to the If-Modified-Since
+header.</p>
+<p>To specify credentials for HTTP authentication add:</p>
+<div class="highlight-none"><div class="highlight"><pre>--conf-user userid --conf-password password
+</pre></div>
+</div>
+<p>to the startup command.</p>
+</div>
+<div class="section" id="multiple-configuration-files">
+<h4>Multiple Configuration Files<a class="headerlink" href="#multiple-configuration-files" title="Permalink to this headline">¶</a></h4>
+<p>As of version 1.10.0 Flume supports being configured from multiple configuration files instead of just one.
+This more easily allows values to be overridden or added based on specific environments. Each file should
+be configured using its own &#8211;conf-file or &#8211;conf-uri option. However, all files should either be provided
+with &#8211;conf-file or with &#8211;conf-uri. If &#8211;conf-file and &#8211;conf-uri appear together as options all &#8211;conf-uri
+configurations will be processed before any of the &#8211;conf-file configurations are merged.</p>
+<p>For example, a configuration of:</p>
+<div class="highlight-none"><div class="highlight"><pre>$ bin/flume-ng agent --conf conf --conf-file example.conf --conf-uri http://localhost:80/flume.conf --conf-uri http://localhost:80/override.conf --name a1 -Dflume.root.logger=INFO,console
+</pre></div>
+</div>
+<p>will cause flume.conf to be read first, override.conf to be merged with it and finally example.conf would be
+merged last. If it is desirec to have example.conf be the base configuration it should be specified using the
+&#8211;conf-uri option either as:</p>
+<div class="highlight-none"><div class="highlight"><pre>--conf-uri classpath://example.conf
+or
+--conf-uri file:///example.conf
+</pre></div>
+</div>
+<p>depending on how it should be accessed.</p>
+</div>
+<div class="section" id="using-environment-variables-system-properies-or-other-properties-configuration-files">
+<h4>Using environment variables, system properies, or other properties configuration files<a class="headerlink" href="#using-environment-variables-system-properies-or-other-properties-configuration-files" title="Permalink to this headline">¶</a></h4>
 <p>Flume has the ability to substitute environment variables in the configuration. For example:</p>
 <div class="highlight-none"><div class="highlight"><pre>a1.sources = r1
 a1.sources.r1.type = netcat
 a1.sources.r1.bind = 0.0.0.0
-a1.sources.r1.port = ${NC_PORT}
+a1.sources.r1.port = ${env:NC_PORT}
 a1.sources.r1.channels = c1
 </pre></div>
 </div>
 <p>NB: it currently works for values only, not for keys. (Ie. only on the &#8220;right side&#8221; of the <cite>=</cite> mark of the config lines.)</p>
-<p>This can be enabled via Java system properties on agent invocation by setting <cite>propertiesImplementation = org.apache.flume.node.EnvVarResolverProperties</cite>.</p>
+<p>As of version 1.10.0 Flume resolves configuration values using Apache Commons Text&#8217;s StringSubstitutor
+class using the default set of Lookups along with a lookup that uses the configuration files as a
+source for replacement values.</p>
 <dl class="docutils">
 <dt>For example::</dt>
-<dd>$ NC_PORT=44444 bin/flume-ng agent &#8211;conf conf &#8211;conf-file example.conf &#8211;name a1 -Dflume.root.logger=INFO,console -DpropertiesImplementation=org.apache.flume.node.EnvVarResolverProperties</dd>
+<dd>$ NC_PORT=44444 bin/flume-ng agent &#8211;conf conf &#8211;conf-file example.conf &#8211;name a1 -Dflume.root.logger=INFO,console</dd>
 </dl>
 <p>Note the above is just an example, environment variables can be configured in other ways, including being set in <cite>conf/flume-env.sh</cite>.</p>
+<p>As noted, system properties are also supported, so the configuration:</p>
+<div class="highlight-none"><div class="highlight"><pre>a1.sources = r1
+a1.sources.r1.type = netcat
+a1.sources.r1.bind = 0.0.0.0
+a1.sources.r1.port = ${sys:NC_PORT}
+a1.sources.r1.channels = c1
+</pre></div>
+</div>
+<p>could be used and the startup command could be:</p>
+<div class="highlight-none"><div class="highlight"><pre>$ bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console -DNC_PORT=44444
+</pre></div>
+</div>
+<p>Furthermore, because multiple configuration files are allowed the first file could contain:</p>
+<div class="highlight-none"><div class="highlight"><pre>a1.sources = r1
+a1.sources.r1.type = netcat
+a1.sources.r1.bind = 0.0.0.0
+a1.sources.r1.port = ${NC_PORT}
+a1.sources.r1.channels = c1
+</pre></div>
+</div>
+<p>and the override file could contain:</p>
+<div class="highlight-none"><div class="highlight"><pre>NC_PORT = 44444
+</pre></div>
+</div>
+<p>In this case the startup command could be:</p>
+<div class="highlight-none"><div class="highlight"><pre>$ bin/flume-ng agent --conf conf --conf-file example.conf --conf-file override.conf --name a1 -Dflume.root.logger=INFO,console
+</pre></div>
+</div>
+<p>Note that the method for specifying environment variables as was done in prior versions will stil work
+but has been deprecated in favor of using ${env:varName}.</p>
+</div>
+<div class="section" id="using-a-command-options-file">
+<h4>Using a command options file<a class="headerlink" href="#using-a-command-options-file" title="Permalink to this headline">¶</a></h4>
+<p>Instead of specifying all the command options on the command line as of version 1.10.0 command
+options may be placed in either /etc/flume/flume.opts or flume.opts on the classpath. An example
+might be:</p>
+<div class="highlight-none"><div class="highlight"><pre>conf-file = example.conf
+conf-file = override.conf
+name = a1
+</pre></div>
+</div>
 </div>
 <div class="section" id="logging-raw-data">
 <h4>Logging raw data<a class="headerlink" href="#logging-raw-data" title="Permalink to this headline">¶</a></h4>
-<p>Logging the raw stream of data flowing through the ingest pipeline is not desired behaviour in
+<p>Logging the raw stream of data flowing through the ingest pipeline is not desired behavior in
 many production environments because this may result in leaking sensitive data or security related
 configurations, such as secret keys, to Flume log files.
 By default, Flume will not log such information. On the other hand, if the data pipeline is broken,
@@ -486,10 +565,10 @@ component:</p>
 <span class="na">&lt;Agent&gt;.sources.&lt;Source&gt;.&lt;someProperty&gt;</span> <span class="o">=</span> <span class="s">&lt;someValue&gt;</span>
 
 <span class="c"># properties for channels</span>
-<span class="na">&lt;Agent&gt;.channel.&lt;Channel&gt;.&lt;someProperty&gt;</span> <span class="o">=</span> <span class="s">&lt;someValue&gt;</span>
+<span class="na">&lt;Agent&gt;.channels.&lt;Channel&gt;.&lt;someProperty&gt;</span> <span class="o">=</span> <span class="s">&lt;someValue&gt;</span>
 
 <span class="c"># properties for sinks</span>
-<span class="na">&lt;Agent&gt;.sources.&lt;Sink&gt;.&lt;someProperty&gt;</span> <span class="o">=</span> <span class="s">&lt;someValue&gt;</span>
+<span class="na">&lt;Agent&gt;.sinks.&lt;Sink&gt;.&lt;someProperty&gt;</span> <span class="o">=</span> <span class="s">&lt;someValue&gt;</span>
 </pre></div>
 </div>
 <p>The property &#8220;type&#8221; needs to be set for each component for Flume to understand
@@ -535,7 +614,7 @@ linked to form multiple flows:</p>
 </pre></div>
 </div>
 <p>Then you can link the sources and sinks to their corresponding channels (for
-sources) of channel (for sinks) to setup two different flows. For example, if
+sources) or channel (for sinks) to setup two different flows. For example, if
 you need to setup two flows in an agent, one going from an external avro client
 to external HDFS and another from output of a tail to avro sink, then here&#8217;s a
 config to do that:</p>
@@ -1544,7 +1623,7 @@ very late if new files keep coming in the spooling directory.</td>
 <td>The maximum time (in millis) to wait between consecutive attempts to
 write to the channel(s) if the channel is full. The source will start at
 a low backoff and increase it exponentially each time the channel throws a
-ChannelException, upto the value specified by this parameter.</td>
+ChannelException, up to the value specified by this parameter.</td>
 </tr>
 <tr class="row-odd"><td>batchSize</td>
 <td>100</td>
@@ -1574,7 +1653,7 @@ Defaults to parsing each line as an event. The class specified must implement
 </tr>
 <tr class="row-even"><td>bufferMaxLines</td>
 <td>&#8211;</td>
-<td>(Obselete) This option is now ignored.</td>
+<td>(Obsolete) This option is now ignored.</td>
 </tr>
 <tr class="row-odd"><td>bufferMaxLineLength</td>
 <td>5000</td>
@@ -1838,7 +1917,7 @@ Requires that the file system keeps track of modification times with at least a
 Use at your own risk.</p>
 </div>
 <p>Experimental source that connects via Streaming API to the 1% sample twitter
-firehose, continously downloads tweets, converts them to Avro format and
+firehose, continuously downloads tweets, converts them to Avro format and
 sends Avro events to a downstream Flume sink. Requires the consumer and
 access tokens and secrets of a Twitter developer account.
 Required properties are in <strong>bold</strong>.</p>
@@ -1940,7 +2019,7 @@ indicates that they are part of the same consumer group</td>
 </tr>
 <tr class="row-even"><td><strong>kafka.topics</strong></td>
 <td>&#8211;</td>
-<td>Comma-separated list of topics the kafka consumer will read messages from.</td>
+<td>Comma-separated list of topics the Kafka consumer will read messages from.</td>
 </tr>
 <tr class="row-odd"><td><strong>kafka.topics.regex</strong></td>
 <td>&#8211;</td>
@@ -2038,8 +2117,8 @@ and value.deserializer(org.apache.kafka.common.serialization.ByteArraySerializer
 </tr>
 <tr class="row-even"><td>zookeeperConnect</td>
 <td>&#8211;</td>
-<td>Is no longer supported by kafka consumer client since 0.9.x. Use kafka.bootstrap.servers
-to establish connection with kafka cluster</td>
+<td>Is no longer supported by Kafka consumer client since 0.9.x. Use kafka.bootstrap.servers
+to establish connection with Kafka cluster</td>
 </tr>
 <tr class="row-odd"><td>migrateZookeeperOffsets</td>
 <td>true</td>
@@ -2104,7 +2183,7 @@ security provider, cipher suites, enabled protocols, truststore or keystore type
 <span class="na">a1.sources.source1.kafka.consumer.ssl.truststore.password</span><span class="o">=</span><span class="s">&lt;password to access the truststore&gt;</span>
 </pre></div>
 </div>
-<p>Specyfing the truststore is optional here, the global truststore can be used instead.
+<p>Specifying the truststore is optional here, the global truststore can be used instead.
 For more details about the global SSL setup, see the <a class="reference internal" href="#ssl-tls-support">SSL/TLS support</a> section.</p>
 <p>Note: By default the property <tt class="docutils literal"><span class="pre">ssl.endpoint.identification.algorithm</span></tt>
 is not defined, so hostname verification is not performed.
@@ -3609,16 +3688,32 @@ fully-qualified class name of an implementation of the
 </tbody>
 </table>
 <p>Deprecated Properties</p>
-<p>Name                    Default       Description
-======================  ============  ======================================================================
-hdfs.callTimeout        30000         Number of milliseconds allowed for HDFS operations, such as open, write, flush, close. This number should be increased if many HDFS timeout operations are occurring.
-======================  ============  ======================================================================</p>
+<table border="1" class="docutils">
+<colgroup>
+<col width="18%" />
+<col width="10%" />
+<col width="72%" />
+</colgroup>
+<thead valign="bottom">
+<tr class="row-odd"><th class="head">Name</th>
+<th class="head">Default</th>
+<th class="head">Description</th>
+</tr>
+</thead>
+<tbody valign="top">
+<tr class="row-even"><td>hdfs.callTimeout</td>
+<td>30000</td>
+<td>Number of milliseconds allowed for HDFS operations, such as open, write, flush, close.
+This number should be increased if many HDFS timeout operations are occurring.</td>
+</tr>
+</tbody>
+</table>
 <p>Example for agent named a1:</p>
 <div class="highlight-properties"><div class="highlight"><pre><span class="na">a1.channels</span> <span class="o">=</span> <span class="s">c1</span>
 <span class="na">a1.sinks</span> <span class="o">=</span> <span class="s">k1</span>
 <span class="na">a1.sinks.k1.type</span> <span class="o">=</span> <span class="s">hdfs</span>
 <span class="na">a1.sinks.k1.channel</span> <span class="o">=</span> <span class="s">c1</span>
-<span class="na">a1.sinks.k1.hdfs.path</span> <span class="o">=</span> <span class="s">/flume/events/%y-%m-%d/%H%M/%S</span>
+<span class="na">a1.sinks.k1.hdfs.path</span> <span class="o">=</span> <span class="s">/flume/events/%Y-%m-%d/%H%M/%S</span>
 <span class="na">a1.sinks.k1.hdfs.filePrefix</span> <span class="o">=</span> <span class="s">events-</span>
 <span class="na">a1.sinks.k1.hdfs.round</span> <span class="o">=</span> <span class="s">true</span>
 <span class="na">a1.sinks.k1.hdfs.roundValue</span> <span class="o">=</span> <span class="s">10</span>
@@ -3871,7 +3966,7 @@ this automatically is to use the TimestampInterceptor.</p>
 <span class="na">a1.sinks.k1.hive.metastore</span> <span class="o">=</span> <span class="s">thrift://127.0.0.1:9083</span>
 <span class="na">a1.sinks.k1.hive.database</span> <span class="o">=</span> <span class="s">logsdb</span>
 <span class="na">a1.sinks.k1.hive.table</span> <span class="o">=</span> <span class="s">weblogs</span>
-<span class="na">a1.sinks.k1.hive.partition</span> <span class="o">=</span> <span class="s">asia,%{country},%y-%m-%d-%H-%M</span>
+<span class="na">a1.sinks.k1.hive.partition</span> <span class="o">=</span> <span class="s">asia,%{country},%Y-%m-%d-%H-%M</span>
 <span class="na">a1.sinks.k1.useLocalTimeStamp</span> <span class="o">=</span> <span class="s">false</span>
 <span class="na">a1.sinks.k1.round</span> <span class="o">=</span> <span class="s">true</span>
 <span class="na">a1.sinks.k1.roundValue</span> <span class="o">=</span> <span class="s">10</span>
@@ -4672,108 +4767,6 @@ will read this information from the first hbase-site.xml file in the classpath.<
 </pre></div>
 </div>
 </div>
-<div class="section" id="elasticsearchsink">
-<h4>ElasticSearchSink<a class="headerlink" href="#elasticsearchsink" title="Permalink to this headline">¶</a></h4>
-<p>This sink writes data to an elasticsearch cluster. By default, events will be written so that the <a class="reference external" href="http://kibana.org">Kibana</a> graphical interface
-can display them - just as if <a class="reference external" href="https://logstash.net">logstash</a> wrote them.</p>
-<p>The elasticsearch and lucene-core jars required for your environment must be placed in the lib directory of the Apache Flume installation.
-Elasticsearch requires that the major version of the client JAR match that of the server and that both are running the same minor version
-of the JVM. SerializationExceptions will appear if this is incorrect. To
-select the required version first determine the version of elasticsearch and the JVM version the target cluster is running. Then select an elasticsearch client
-library which matches the major version. A 0.19.x client can talk to a 0.19.x cluster; 0.20.x can talk to 0.20.x and 0.90.x can talk to 0.90.x. Once the
-elasticsearch version has been determined then read the pom.xml file to determine the correct lucene-core JAR version to use. The Flume agent
-which is running the ElasticSearchSink should also match the JVM the target cluster is running down to the minor version.</p>
-<p>Events will be written to a new index every day. The name will be &lt;indexName&gt;-yyyy-MM-dd where &lt;indexName&gt; is the indexName parameter. The sink
-will start writing to a new index at midnight UTC.</p>
-<p>Events are serialized for elasticsearch by the ElasticSearchLogStashEventSerializer by default. This behaviour can be
-overridden with the serializer parameter. This parameter accepts implementations of org.apache.flume.sink.elasticsearch.ElasticSearchEventSerializer
-or org.apache.flume.sink.elasticsearch.ElasticSearchIndexRequestBuilderFactory. Implementing ElasticSearchEventSerializer is deprecated in favour of
-the more powerful ElasticSearchIndexRequestBuilderFactory.</p>
-<p>The type is the FQCN: org.apache.flume.sink.elasticsearch.ElasticSearchSink</p>
-<p>Required properties are in <strong>bold</strong>.</p>
-<table border="1" class="docutils">
-<colgroup>
-<col width="8%" />
-<col width="37%" />
-<col width="54%" />
-</colgroup>
-<thead valign="bottom">
-<tr class="row-odd"><th class="head">Property Name</th>
-<th class="head">Default</th>
-<th class="head">Description</th>
-</tr>
-</thead>
-<tbody valign="top">
-<tr class="row-even"><td><strong>channel</strong></td>
-<td>&#8211;</td>
-<td>&nbsp;</td>
-</tr>
-<tr class="row-odd"><td><strong>type</strong></td>
-<td>&#8211;</td>
-<td>The component type name, needs to be <tt class="docutils literal"><span class="pre">org.apache.flume.sink.elasticsearch.ElasticSearchSink</span></tt></td>
-</tr>
-<tr class="row-even"><td><strong>hostNames</strong></td>
-<td>&#8211;</td>
-<td>Comma separated list of hostname:port, if the port is not present the default port &#8216;9300&#8217; will be used</td>
-</tr>
-<tr class="row-odd"><td>indexName</td>
-<td>flume</td>
-<td>The name of the index which the date will be appended to. Example &#8216;flume&#8217; -&gt; &#8216;flume-yyyy-MM-dd&#8217;
-Arbitrary header substitution is supported, eg. %{header} replaces with value of named event header</td>
-</tr>
-<tr class="row-even"><td>indexType</td>
-<td>logs</td>
-<td>The type to index the document to, defaults to &#8216;log&#8217;
-Arbitrary header substitution is supported, eg. %{header} replaces with value of named event header</td>
-</tr>
-<tr class="row-odd"><td>clusterName</td>
-<td>elasticsearch</td>
-<td>Name of the ElasticSearch cluster to connect to</td>
-</tr>
-<tr class="row-even"><td>batchSize</td>
-<td>100</td>
-<td>Number of events to be written per txn.</td>
-</tr>
-<tr class="row-odd"><td>ttl</td>
-<td>&#8211;</td>
-<td>TTL in days, when set will cause the expired documents to be deleted automatically,
-if not set documents will never be automatically deleted. TTL is accepted both in the earlier form of
-integer only e.g. a1.sinks.k1.ttl = 5 and also with a qualifier ms (millisecond), s (second), m (minute),
-h (hour), d (day) and w (week). Example a1.sinks.k1.ttl = 5d will set TTL to 5 days. Follow
-<a class="reference external" href="http://www.elasticsearch.org/guide/reference/mapping/ttl-field/">http://www.elasticsearch.org/guide/reference/mapping/ttl-field/</a> for more information.</td>
-</tr>
-<tr class="row-even"><td>serializer</td>
-<td>org.apache.flume.sink.elasticsearch.ElasticSearchLogStashEventSerializer</td>
-<td>The ElasticSearchIndexRequestBuilderFactory or ElasticSearchEventSerializer to use. Implementations of
-either class are accepted but ElasticSearchIndexRequestBuilderFactory is preferred.</td>
-</tr>
-<tr class="row-odd"><td>serializer.*</td>
-<td>&#8211;</td>
-<td>Properties to be passed to the serializer.</td>
-</tr>
-</tbody>
-</table>
-<div class="admonition note">
-<p class="first admonition-title">Note</p>
-<p class="last">Header substitution is a handy to use the value of an event header to dynamically decide the indexName and indexType to use when storing the event.
-Caution should be used in using this feature as the event submitter now has control of the indexName and indexType.
-Furthermore, if the elasticsearch REST client is used then the event submitter has control of the URL path used.</p>
-</div>
-<p>Example for agent named a1:</p>
-<div class="highlight-properties"><div class="highlight"><pre><span class="na">a1.channels</span> <span class="o">=</span> <span class="s">c1</span>
-<span class="na">a1.sinks</span> <span class="o">=</span> <span class="s">k1</span>
-<span class="na">a1.sinks.k1.type</span> <span class="o">=</span> <span class="s">elasticsearch</span>
-<span class="na">a1.sinks.k1.hostNames</span> <span class="o">=</span> <span class="s">127.0.0.1:9200,127.0.0.2:9300</span>
-<span class="na">a1.sinks.k1.indexName</span> <span class="o">=</span> <span class="s">foo_index</span>
-<span class="na">a1.sinks.k1.indexType</span> <span class="o">=</span> <span class="s">bar_type</span>
-<span class="na">a1.sinks.k1.clusterName</span> <span class="o">=</span> <span class="s">foobar_cluster</span>
-<span class="na">a1.sinks.k1.batchSize</span> <span class="o">=</span> <span class="s">500</span>
-<span class="na">a1.sinks.k1.ttl</span> <span class="o">=</span> <span class="s">5d</span>
-<span class="na">a1.sinks.k1.serializer</span> <span class="o">=</span> <span class="s">org.apache.flume.sink.elasticsearch.ElasticSearchDynamicSerializer</span>
-<span class="na">a1.sinks.k1.channel</span> <span class="o">=</span> <span class="s">c1</span>
-</pre></div>
-</div>
-</div>
 <div class="section" id="kite-dataset-sink">
 <h4>Kite Dataset Sink<a class="headerlink" href="#kite-dataset-sink" title="Permalink to this headline">¶</a></h4>
 <p>Experimental sink that writes events to a <a class="reference external" href="http://kitesdk.org/docs/current/guide/">Kite Dataset</a>.
@@ -6125,6 +6118,43 @@ Required properties are in <strong>bold</strong>.</p>
 simply ignored. Since c1 and c2 are not marked optional, failure to write to
 those channels will cause the transaction to fail.</p>
 </div>
+<div class="section" id="load-balancing-channel-selector">
+<h4>Load Balancing Channel Selector<a class="headerlink" href="#load-balancing-channel-selector" title="Permalink to this headline">¶</a></h4>
+<p>Load balancing channel selector provides the ability to load-balance flow over multiple channels. This
+effectively allows the incoming data to be processed on multiple threads. It maintains an indexed list of active channels on which the load must be distributed. Implementation supports distributing load using either via round_robin or random selection mechanisms. The choice of selection mechanism defaults to round_robin type, but can be overridden via configuration.</p>
+<p>Required properties are in <strong>bold</strong>.</p>
+<table border="1" class="docutils">
+<colgroup>
+<col width="17%" />
+<col width="20%" />
+<col width="63%" />
+</colgroup>
+<thead valign="bottom">
+<tr class="row-odd"><th class="head">Property Name</th>
+<th class="head">Default</th>
+<th class="head">Description</th>
+</tr>
+</thead>
+<tbody valign="top">
+<tr class="row-even"><td>selector.type</td>
+<td>replicating</td>
+<td>The component type name, needs to be <tt class="docutils literal"><span class="pre">load_balancing</span></tt></td>
+</tr>
+<tr class="row-odd"><td>selector.policy</td>
+<td><tt class="docutils literal"><span class="pre">round_robin</span></tt></td>
+<td>Selection mechanism. Must be either <tt class="docutils literal"><span class="pre">round_robin</span></tt> or <tt class="docutils literal"><span class="pre">random</span></tt>.</td>
+</tr>
+</tbody>
+</table>
+<p>Example for agent named a1 and it&#8217;s source called r1:</p>
+<div class="highlight-properties"><div class="highlight"><pre><span class="na">a1.sources</span> <span class="o">=</span> <span class="s">r1</span>
+<span class="na">a1.channels</span> <span class="o">=</span> <span class="s">c1 c2 c3 c4</span>
+<span class="na">a1.sources.r1.channels</span> <span class="o">=</span> <span class="s">c1 c2 c3 c4</span>
+<span class="na">a1.sources.r1.selector.type</span> <span class="o">=</span> <span class="s">load_balancing</span>
+<span class="na">a1.sources.r1.selector.policy</span> <span class="o">=</span> <span class="s">round_robin</span>
+</pre></div>
+</div>
+</div>
 <div class="section" id="multiplexing-channel-selector">
 <h4>Multiplexing Channel Selector<a class="headerlink" href="#multiplexing-channel-selector" title="Permalink to this headline">¶</a></h4>
 <p>Required properties are in <strong>bold</strong>.</p>
@@ -6452,7 +6482,7 @@ Flume events in the Avro RPC mechanism.</p>
 <p>Example for agent named a1:</p>
 <div class="highlight-properties"><div class="highlight"><pre><span class="na">a1.sinks.k1.type</span> <span class="o">=</span> <span class="s">hdfs</span>
 <span class="na">a1.sinks.k1.channel</span> <span class="o">=</span> <span class="s">c1</span>
-<span class="na">a1.sinks.k1.hdfs.path</span> <span class="o">=</span> <span class="s">/flume/events/%y-%m-%d/%H%M/%S</span>
+<span class="na">a1.sinks.k1.hdfs.path</span> <span class="o">=</span> <span class="s">/flume/events/%Y-%m-%d/%H%M/%S</span>
 <span class="na">a1.sinks.k1.serializer</span> <span class="o">=</span> <span class="s">avro_event</span>
 <span class="na">a1.sinks.k1.serializer.compressionCodec</span> <span class="o">=</span> <span class="s">snappy</span>
 </pre></div>
@@ -7190,7 +7220,7 @@ Provider password can be set with the HADOOP_CREDSTORE_PASSWORD environment vari
 <h2>Log4J Appender<a class="headerlink" href="#log4j-appender" title="Permalink to this headline">¶</a></h2>
 <p>Appends Log4j events to a flume agent&#8217;s avro source. A client using this
 appender must have the flume-ng-sdk in the classpath (eg,
-flume-ng-sdk-1.9.0.jar).
+flume-ng-sdk-1.10.0.jar).
 Required properties are in <strong>bold</strong>.</p>
 <table border="1" class="docutils">
 <colgroup>
@@ -7270,7 +7300,7 @@ then the schema will be included as a Flume header.</p>
 <h2>Load Balancing Log4J Appender<a class="headerlink" href="#load-balancing-log4j-appender" title="Permalink to this headline">¶</a></h2>
 <p>Appends Log4j events to a list of flume agent&#8217;s avro source. A client using this
 appender must have the flume-ng-sdk in the classpath (eg,
-flume-ng-sdk-1.9.0.jar). This appender supports a round-robin and random
+flume-ng-sdk-1.10.0.jar). This appender supports a round-robin and random
 scheme for performing the load balancing. It also supports a configurable backoff
 timeout so that down agents are removed temporarily from the set of hosts
 Required properties are in <strong>bold</strong>.</p>
@@ -7648,109 +7678,39 @@ source code of the components.</p>
 </div>
 <div class="section" id="sinks-1">
 <h4>Sinks 1<a class="headerlink" href="#sinks-1" title="Permalink to this headline">¶</a></h4>
-<table border="1" class="docutils">
-<colgroup>
-<col width="30%" />
-<col width="16%" />
-<col width="15%" />
-<col width="19%" />
-<col width="9%" />
-<col width="10%" />
-</colgroup>
-<tbody valign="top">
-<tr class="row-odd"><td>&nbsp;</td>
-<td>Avro/Thrift</td>
-<td>AsyncHBase</td>
-<td>ElasticSearch</td>
-<td>HBase</td>
-<td>HBase2</td>
-</tr>
-<tr class="row-even"><td>BatchCompleteCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-odd"><td>BatchEmptyCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-even"><td>BatchUnderflowCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-odd"><td>ChannelReadFail</td>
-<td>x</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>x</td>
-</tr>
-<tr class="row-even"><td>ConnectionClosedCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-odd"><td>ConnectionCreatedCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-even"><td>ConnectionFailedCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-odd"><td>EventDrainAttemptCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-even"><td>EventDrainSuccessCount</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-<td>x</td>
-</tr>
-<tr class="row-odd"><td>EventWriteFail</td>
-<td>x</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>x</td>
-</tr>
-<tr class="row-even"><td>KafkaEventSendTimer</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-</tr>
-<tr class="row-odd"><td>RollbackCount</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-<td>&nbsp;</td>
-</tr>
-</tbody>
-</table>
+<div class="system-message">
+<p class="system-message-title">System Message: ERROR/3 (<tt class="docutils">/Users/rgoers/projects/apache/flume/flume-site/source/sphinx/FlumeUserGuide.rst</tt>, line 5070)</p>
+<p>Malformed table.</p>
+<div class="highlight-none"><div class="highlight"><pre>+------------------------+-------------+------------+-------+--------+
+|                        | Avro/Thrift | AsyncHBase | HBase | HBase2 |
++------------------------+-------------+------------+-------+--------+-
+| BatchCompleteCount     | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| BatchEmptyCount        | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| BatchUnderflowCount    | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ChannelReadFail        | x           |            |       | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionClosedCount  | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionCreatedCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionFailedCount  | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventDrainAttemptCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventDrainSuccessCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventWriteFail         | x           |            |       | x      |
++------------------------+-------------+------------+-------+--------+
+| KafkaEventSendTimer    |             |            |       |        |
++------------------------+-------------+------------+-------+--------+
+| RollbackCount          |             |            |       |        |
++------------------------+-------------+------------+-------+--------+
+</pre></div>
+</div>
+</div>
 </div>
 <div class="section" id="sinks-2">
 <h4>Sinks 2<a class="headerlink" href="#sinks-2" title="Permalink to this headline">¶</a></h4>
@@ -8557,98 +8517,94 @@ can be leveraged to move the Flume agent to another host.</p>
 <td>org.apache.flume.sink.hbase.AsyncHBaseSink</td>
 </tr>
 <tr class="row-even"><td>org.apache.flume.Sink</td>
-<td>elasticsearch</td>
-<td>org.apache.flume.sink.elasticsearch.ElasticSearchSink</td>
-</tr>
-<tr class="row-odd"><td>org.apache.flume.Sink</td>
 <td>file_roll</td>
 <td>org.apache.flume.sink.RollingFileSink</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.Sink</td>
+<tr class="row-odd"><td>org.apache.flume.Sink</td>
 <td>irc</td>
 <td>org.apache.flume.sink.irc.IRCSink</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.Sink</td>
+<tr class="row-even"><td>org.apache.flume.Sink</td>
 <td>thrift</td>
 <td>org.apache.flume.sink.ThriftSink</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.Sink</td>
+<tr class="row-odd"><td>org.apache.flume.Sink</td>
 <td>&#8211;</td>
 <td>org.example.MySink</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.ChannelSelector</td>
+<tr class="row-even"><td>org.apache.flume.ChannelSelector</td>
 <td>replicating</td>
 <td>org.apache.flume.channel.ReplicatingChannelSelector</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.ChannelSelector</td>
+<tr class="row-odd"><td>org.apache.flume.ChannelSelector</td>
 <td>multiplexing</td>
 <td>org.apache.flume.channel.MultiplexingChannelSelector</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.ChannelSelector</td>
+<tr class="row-even"><td>org.apache.flume.ChannelSelector</td>
 <td>&#8211;</td>
 <td>org.example.MyChannelSelector</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.SinkProcessor</td>
+<tr class="row-odd"><td>org.apache.flume.SinkProcessor</td>
 <td>default</td>
 <td>org.apache.flume.sink.DefaultSinkProcessor</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.SinkProcessor</td>
+<tr class="row-even"><td>org.apache.flume.SinkProcessor</td>
 <td>failover</td>
 <td>org.apache.flume.sink.FailoverSinkProcessor</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.SinkProcessor</td>
+<tr class="row-odd"><td>org.apache.flume.SinkProcessor</td>
 <td>load_balance</td>
 <td>org.apache.flume.sink.LoadBalancingSinkProcessor</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.SinkProcessor</td>
+<tr class="row-even"><td>org.apache.flume.SinkProcessor</td>
 <td>&#8211;</td>
 <td>&nbsp;</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.interceptor.Interceptor</td>
+<tr class="row-odd"><td>org.apache.flume.interceptor.Interceptor</td>
 <td>timestamp</td>
 <td>org.apache.flume.interceptor.TimestampInterceptor$Builder</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.interceptor.Interceptor</td>
+<tr class="row-even"><td>org.apache.flume.interceptor.Interceptor</td>
 <td>host</td>
 <td>org.apache.flume.interceptor.HostInterceptor$Builder</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.interceptor.Interceptor</td>
+<tr class="row-odd"><td>org.apache.flume.interceptor.Interceptor</td>
 <td>static</td>
 <td>org.apache.flume.interceptor.StaticInterceptor$Builder</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.interceptor.Interceptor</td>
+<tr class="row-even"><td>org.apache.flume.interceptor.Interceptor</td>
 <td>regex_filter</td>
 <td>org.apache.flume.interceptor.RegexFilteringInterceptor$Builder</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.interceptor.Interceptor</td>
+<tr class="row-odd"><td>org.apache.flume.interceptor.Interceptor</td>
 <td>regex_extractor</td>
 <td>org.apache.flume.interceptor.RegexFilteringInterceptor$Builder</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.channel.file.encryption.KeyProvider$Builder</td>
+<tr class="row-even"><td>org.apache.flume.channel.file.encryption.KeyProvider$Builder</td>
 <td>jceksfile</td>
 <td>org.apache.flume.channel.file.encryption.JCEFileKeyProvider</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.channel.file.encryption.KeyProvider$Builder</td>
+<tr class="row-odd"><td>org.apache.flume.channel.file.encryption.KeyProvider$Builder</td>
 <td>&#8211;</td>
 <td>org.example.MyKeyProvider</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.channel.file.encryption.CipherProvider</td>
+<tr class="row-even"><td>org.apache.flume.channel.file.encryption.CipherProvider</td>
 <td>aesctrnopadding</td>
 <td>org.apache.flume.channel.file.encryption.AESCTRNoPaddingProvider</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.channel.file.encryption.CipherProvider</td>
+<tr class="row-odd"><td>org.apache.flume.channel.file.encryption.CipherProvider</td>
 <td>&#8211;</td>
 <td>org.example.MyCipherProvider</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.serialization.EventSerializer$Builder</td>
+<tr class="row-even"><td>org.apache.flume.serialization.EventSerializer$Builder</td>
 <td>text</td>
 <td>org.apache.flume.serialization.BodyTextEventSerializer$Builder</td>
 </tr>
-<tr class="row-even"><td>org.apache.flume.serialization.EventSerializer$Builder</td>
+<tr class="row-odd"><td>org.apache.flume.serialization.EventSerializer$Builder</td>
 <td>avro_event</td>
 <td>org.apache.flume.serialization.FlumeEventAvroEventSerializer$Builder</td>
 </tr>
-<tr class="row-odd"><td>org.apache.flume.serialization.EventSerializer$Builder</td>
+<tr class="row-even"><td>org.apache.flume.serialization.EventSerializer$Builder</td>
 <td>&#8211;</td>
 <td>org.example.MyEventSerializer$Builder</td>
 </tr>
@@ -8741,7 +8697,7 @@ can be leveraged to move the Flume agent to another host.</p>
 
 <h3><a href="index.html">This Page</a></h3>
 <ul>
-<li><a class="reference internal" href="#">Flume 1.9.0 User Guide</a><ul>
+<li><a class="reference internal" href="#">Flume 1.10.0 User Guide</a><ul>
 <li><a class="reference internal" href="#introduction">Introduction</a><ul>
 <li><a class="reference internal" href="#overview">Overview</a></li>
 <li><a class="reference internal" href="#system-requirements">System Requirements</a></li>
@@ -8760,7 +8716,10 @@ can be leveraged to move the Flume agent to another host.</p>
 <li><a class="reference internal" href="#wiring-the-pieces-together">Wiring the pieces together</a></li>
 <li><a class="reference internal" href="#starting-an-agent">Starting an agent</a></li>
 <li><a class="reference internal" href="#a-simple-example">A simple example</a></li>
-<li><a class="reference internal" href="#using-environment-variables-in-configuration-files">Using environment variables in configuration files</a></li>
+<li><a class="reference internal" href="#configuration-from-uris">Configuration from URIs</a></li>
+<li><a class="reference internal" href="#multiple-configuration-files">Multiple Configuration Files</a></li>
+<li><a class="reference internal" href="#using-environment-variables-system-properies-or-other-properties-configuration-files">Using environment variables, system properies, or other properties configuration files</a></li>
+<li><a class="reference internal" href="#using-a-command-options-file">Using a command options file</a></li>
 <li><a class="reference internal" href="#logging-raw-data">Logging raw data</a></li>
 <li><a class="reference internal" href="#zookeeper-based-configuration">Zookeeper based Configuration</a></li>
 <li><a class="reference internal" href="#installing-third-party-plugins">Installing third-party plugins</a><ul>
@@ -8850,7 +8809,6 @@ can be leveraged to move the Flume agent to another host.</p>
 </ul>
 </li>
 <li><a class="reference internal" href="#morphlinesolrsink">MorphlineSolrSink</a></li>
-<li><a class="reference internal" href="#elasticsearchsink">ElasticSearchSink</a></li>
 <li><a class="reference internal" href="#kite-dataset-sink">Kite Dataset Sink</a></li>
 <li><a class="reference internal" href="#kafka-sink">Kafka Sink</a></li>
 <li><a class="reference internal" href="#http-sink">HTTP Sink</a></li>
@@ -8869,6 +8827,7 @@ can be leveraged to move the Flume agent to another host.</p>
 </li>
 <li><a class="reference internal" href="#flume-channel-selectors">Flume Channel Selectors</a><ul>
 <li><a class="reference internal" href="#replicating-channel-selector-default">Replicating Channel Selector (default)</a></li>
+<li><a class="reference internal" href="#load-balancing-channel-selector">Load Balancing Channel Selector</a></li>
 <li><a class="reference internal" href="#multiplexing-channel-selector">Multiplexing Channel Selector</a></li>
 <li><a class="reference internal" href="#custom-channel-selector">Custom Channel Selector</a></li>
 </ul>
@@ -8977,7 +8936,7 @@ can be leveraged to move the Flume agent to another host.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/_sources/FlumeDeveloperGuide.txt b/content/_sources/FlumeDeveloperGuide.txt
index 5e98bd3..f383cda 100644
--- a/content/_sources/FlumeDeveloperGuide.txt
+++ b/content/_sources/FlumeDeveloperGuide.txt
@@ -15,7 +15,7 @@
 
 
 ======================================
-Flume 1.9.0 Developer Guide
+Flume 1.10.0 Developer Guide
 ======================================
 
 Introduction
@@ -866,3 +866,93 @@ Channel
 ~~~~~~~
 
 TBD
+
+Initializable
+~~~~~~~~~~~~~
+
+As of Flume 1.10.0 Sources, Sinks, and Channels may implement the Intitializable interface. Doing so
+allows the component to have access the materialized configuration before any of the components have been
+started.
+
+This example shows a Sink being configured with the name of a Source. While initializing it will
+retrieve the Source from the configuration and save it. During event processing a new event will be
+sent to the Source, presumably after the event has be modified in some way.
+
+.. code-block:: java
+
+  public class NullInitSink extends NullSink implements Initializable {
+
+    private static final Logger logger = LoggerFactory.getLogger(NullInitSink.class);
+    private String sourceName = null;
+    private EventProcessor eventProcessor = null;
+    private long total = 0;
+
+    public NullInitSink() {
+      super();
+    }
+
+    @Override
+    public void configure(Context context) {
+      sourceName = context.getString("targetSource");
+      super.configure(context);
+
+    }
+
+    @Override
+    public void initialize(MaterializedConfiguration configuration) {
+      logger.debug("Locating source for event publishing");
+      for (Map.Entry<String, SourceRunner>  entry : configuration.getSourceRunners().entrySet()) {
+        if (entry.getKey().equals(sourceName)) {
+          Source source = entry.getValue().getSource();
+          if (source instanceof EventProcessor) {
+            eventProcessor = (EventProcessor) source;
+            logger.debug("Found event processor {}", source.getName());
+            return;
+          }
+        }
+      }
+      logger.warn("No Source named {} found for republishing events.", sourceName);
+    }
+
+    @Override
+    public Status process() throws EventDeliveryException {
+      Status status = Status.READY;
+
+      Channel channel = getChannel();
+      Transaction transaction = channel.getTransaction();
+      Event event = null;
+      CounterGroup counterGroup = getCounterGroup();
+      long batchSize = getBatchSize();
+      long eventCounter = counterGroup.get("events.success");
+
+      try {
+        transaction.begin();
+        int i = 0;
+        for (i = 0; i < batchSize; i++) {
+          event = channel.take();
+          if (event != null) {
+            long id = Long.parseLong(new String(event.getBody()));
+            total += id;
+            event.getHeaders().put("Total", Long.toString(total));
+            eventProcessor.processEvent(event);
+            logger.info("Null sink {} successful processed event {}", getName(), id);
+          } else {
+            status = Status.BACKOFF;
+            break;
+          }
+        }
+        transaction.commit();
+        counterGroup.addAndGet("events.success", (long) Math.min(batchSize, i));
+        counterGroup.incrementAndGet("transaction.success");
+      } catch (Exception ex) {
+        transaction.rollback();
+        counterGroup.incrementAndGet("transaction.failed");
+        logger.error("Failed to deliver event. Exception follows.", ex);
+        throw new EventDeliveryException("Failed to deliver event: " + event, ex);
+      } finally {
+        transaction.close();
+      }
+
+      return status;
+    }
+  }
\ No newline at end of file
diff --git a/content/_sources/FlumeUserGuide.txt b/content/_sources/FlumeUserGuide.txt
index b740507..7d7b3fd 100644
--- a/content/_sources/FlumeUserGuide.txt
+++ b/content/_sources/FlumeUserGuide.txt
@@ -14,9 +14,9 @@
    limitations under the License.
 
 
-===============================
-Flume 1.9.0 User Guide
-===============================
+================================
+Flume 1.10.0 User Guide
+================================
 
 Introduction
 ============
@@ -109,18 +109,20 @@ There's also a memory channel which simply stores the events in an in-memory
 queue, which is faster but any events still left in the memory channel when an
 agent process dies can't be recovered.
 
+Flume's `KafkaChannel` uses Apache Kafka to stage events. Using a replicated
+Kafka topic as a channel helps avoiding event loss in case of a disk failure.
+
 Setup
 =====
 
 Setting up an agent
 -------------------
 
-Flume agent configuration is stored in a local configuration file.  This is a
-text file that follows the Java properties file format.
-Configurations for one or more agents can be specified in the same
-configuration file. The configuration file includes properties of each source,
-sink and channel in an agent and how they are wired together to form data
-flows.
+Flume agent configuration is stored in one or more configuration files that
+follow the Java properties file format. Configurations for one or more agents
+can be specified in these configuration files. The configuration includes
+properties of each source, sink and channel in an agent and how they are wired
+together to form data flows.
 
 Configuring individual components
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -222,30 +224,110 @@ The original Flume terminal will output the event in a log message.
 
 Congratulations - you've successfully configured and deployed a Flume agent! Subsequent sections cover agent configuration in much more detail.
 
-Using environment variables in configuration files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Configuration from URIs
+~~~~~~~~~~~~~~~~~~~~~~~
+As of version 1.10.0 Flume supports being configured using URIs instead of just from local files. Direct support
+for HTTP(S), file, and classpath URIs is included. The HTTP support includes support for authentication using
+basic authorization but other authorization mechanisms may be supported by specifying the fully qualified name
+of the class that implements the AuthorizationProvider interface using the --auth-provider option. HTTP also
+supports reloading of configuration files using polling if the target server properly responds to the If-Modified-Since
+header.
+
+To specify credentials for HTTP authentication add::
+
+  --conf-user userid --conf-password password
+
+to the startup command.
+
+Multiple Configuration Files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+As of version 1.10.0 Flume supports being configured from multiple configuration files instead of just one.
+This more easily allows values to be overridden or added based on specific environments. Each file should
+be configured using its own --conf-file or --conf-uri option. However, all files should either be provided
+with --conf-file or with --conf-uri. If --conf-file and --conf-uri appear together as options all --conf-uri
+configurations will be processed before any of the --conf-file configurations are merged.
+
+For example, a configuration of::
+
+  $ bin/flume-ng agent --conf conf --conf-file example.conf --conf-uri http://localhost:80/flume.conf --conf-uri http://localhost:80/override.conf --name a1 -Dflume.root.logger=INFO,console
+
+will cause flume.conf to be read first, override.conf to be merged with it and finally example.conf would be
+merged last. If it is desirec to have example.conf be the base configuration it should be specified using the
+--conf-uri option either as::
+
+  --conf-uri classpath://example.conf
+  or
+  --conf-uri file:///example.conf
+
+depending on how it should be accessed.
+
+Using environment variables, system properies, or other properties configuration files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Flume has the ability to substitute environment variables in the configuration. For example::
 
   a1.sources = r1
   a1.sources.r1.type = netcat
   a1.sources.r1.bind = 0.0.0.0
-  a1.sources.r1.port = ${NC_PORT}
+  a1.sources.r1.port = ${env:NC_PORT}
   a1.sources.r1.channels = c1
 
 NB: it currently works for values only, not for keys. (Ie. only on the "right side" of the `=` mark of the config lines.)
 
-This can be enabled via Java system properties on agent invocation by setting `propertiesImplementation = org.apache.flume.node.EnvVarResolverProperties`.
+As of version 1.10.0 Flume resolves configuration values using Apache Commons Text's StringSubstitutor
+class using the default set of Lookups along with a lookup that uses the configuration files as a
+source for replacement values.
 
 For example::
-  $ NC_PORT=44444 bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console -DpropertiesImplementation=org.apache.flume.node.EnvVarResolverProperties
+  $ NC_PORT=44444 bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console
 
 Note the above is just an example, environment variables can be configured in other ways, including being set in `conf/flume-env.sh`.
 
+As noted, system properties are also supported, so the configuration::
+
+  a1.sources = r1
+  a1.sources.r1.type = netcat
+  a1.sources.r1.bind = 0.0.0.0
+  a1.sources.r1.port = ${sys:NC_PORT}
+  a1.sources.r1.channels = c1
+
+could be used and the startup command could be::
+
+  $ bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console -DNC_PORT=44444
+
+Furthermore, because multiple configuration files are allowed the first file could contain::
+
+  a1.sources = r1
+  a1.sources.r1.type = netcat
+  a1.sources.r1.bind = 0.0.0.0
+  a1.sources.r1.port = ${NC_PORT}
+  a1.sources.r1.channels = c1
+
+and the override file could contain::
+
+  NC_PORT = 44444
+
+In this case the startup command could be::
+
+  $ bin/flume-ng agent --conf conf --conf-file example.conf --conf-file override.conf --name a1 -Dflume.root.logger=INFO,console
+
+Note that the method for specifying environment variables as was done in prior versions will stil work
+but has been deprecated in favor of using ${env:varName}.
+
+Using a command options file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Instead of specifying all the command options on the command line as of version 1.10.0 command
+options may be placed in either /etc/flume/flume.opts or flume.opts on the classpath. An example
+might be::
+
+  conf-file = example.conf
+  conf-file = override.conf
+  name = a1
+
 Logging raw data
 ~~~~~~~~~~~~~~~~
 
 
-Logging the raw stream of data flowing through the ingest pipeline is not desired behaviour in
+Logging the raw stream of data flowing through the ingest pipeline is not desired behavior in
 many production environments because this may result in leaking sensitive data or security related
 configurations, such as secret keys, to Flume log files.
 By default, Flume will not log such information. On the other hand, if the data pipeline is broken,
@@ -487,10 +569,10 @@ component:
   <Agent>.sources.<Source>.<someProperty> = <someValue>
 
   # properties for channels
-  <Agent>.channel.<Channel>.<someProperty> = <someValue>
+  <Agent>.channels.<Channel>.<someProperty> = <someValue>
 
   # properties for sinks
-  <Agent>.sources.<Sink>.<someProperty> = <someValue>
+  <Agent>.sinks.<Sink>.<someProperty> = <someValue>
 
 The property "type" needs to be set for each component for Flume to understand
 what kind of object it needs to be. Each source, sink and channel type has its
@@ -539,7 +621,7 @@ linked to form multiple flows:
   <Agent>.channels = <Channel1> <Channel2>
 
 Then you can link the sources and sinks to their corresponding channels (for
-sources) of channel (for sinks) to setup two different flows. For example, if
+sources) or channel (for sinks) to setup two different flows. For example, if
 you need to setup two flows in an agent, one going from an external avro client
 to external HDFS and another from output of a tail to avro sink, then here's a
 config to do that:
@@ -1248,7 +1330,7 @@ recursiveDirectorySearch  false           Whether to monitor sub directories for
 maxBackoff                4000            The maximum time (in millis) to wait between consecutive attempts to
                                           write to the channel(s) if the channel is full. The source will start at
                                           a low backoff and increase it exponentially each time the channel throws a
-                                          ChannelException, upto the value specified by this parameter.
+                                          ChannelException, up to the value specified by this parameter.
 batchSize                 100             Granularity at which to batch transfer to the channel
 inputCharset              UTF-8           Character set used by deserializers that treat the input file as text.
 decodeErrorPolicy         ``FAIL``        What to do when we see a non-decodable character in the input file.
@@ -1260,7 +1342,7 @@ deserializer              ``LINE``        Specify the deserializer used to parse
                                           Defaults to parsing each line as an event. The class specified must implement
                                           ``EventDeserializer.Builder``.
 deserializer.*                            Varies per event deserializer.
-bufferMaxLines            --              (Obselete) This option is now ignored.
+bufferMaxLines            --              (Obsolete) This option is now ignored.
 bufferMaxLineLength       5000            (Deprecated) Maximum length of a line in the commit buffer. Use deserializer.maxLineLength instead.
 selector.type             replicating     replicating or multiplexing
 selector.*                                Depends on the selector.type value
@@ -1412,7 +1494,7 @@ Twitter 1% firehose Source (experimental)
   Use at your own risk.
 
 Experimental source that connects via Streaming API to the 1% sample twitter
-firehose, continously downloads tweets, converts them to Avro format and
+firehose, continuously downloads tweets, converts them to Avro format and
 sends Avro events to a downstream Flume sink. Requires the consumer and
 access tokens and secrets of a Twitter developer account.
 Required properties are in **bold**.
@@ -1460,7 +1542,7 @@ Property Name                       Default      Description
 **kafka.bootstrap.servers**         --           List of brokers in the Kafka cluster used by the source
 kafka.consumer.group.id             flume        Unique identified of consumer group. Setting the same id in multiple sources or agents
                                                  indicates that they are part of the same consumer group
-**kafka.topics**                    --           Comma-separated list of topics the kafka consumer will read messages from.
+**kafka.topics**                    --           Comma-separated list of topics the Kafka consumer will read messages from.
 **kafka.topics.regex**              --           Regex that defines set of topics the source is subscribed on. This property has higher priority
                                                  than ``kafka.topics`` and overrides ``kafka.topics`` if exists.
 batchSize                           1000         Maximum number of messages written to Channel in one batch
@@ -1505,8 +1587,8 @@ Property Name                    Default              Description
 ===============================  ===================  ================================================================================================
 topic                            --                   Use kafka.topics
 groupId                          flume                Use kafka.consumer.group.id
-zookeeperConnect                 --                   Is no longer supported by kafka consumer client since 0.9.x. Use kafka.bootstrap.servers
-                                                      to establish connection with kafka cluster
+zookeeperConnect                 --                   Is no longer supported by Kafka consumer client since 0.9.x. Use kafka.bootstrap.servers
+                                                      to establish connection with Kafka cluster
 migrateZookeeperOffsets          true                 When no Kafka stored offset is found, look up the offsets in Zookeeper and commit them to Kafka.
                                                       This should be true to support seamless Kafka client migration from older versions of Flume.
                                                       Once migrated this can be set to false, though that should generally not be required.
@@ -1579,7 +1661,7 @@ Example configuration with server side authentication and data encryption.
     a1.sources.source1.kafka.consumer.ssl.truststore.location=/path/to/truststore.jks
     a1.sources.source1.kafka.consumer.ssl.truststore.password=<password to access the truststore>
 
-Specyfing the truststore is optional here, the global truststore can be used instead.
+Specifying the truststore is optional here, the global truststore can be used instead.
 For more details about the global SSL setup, see the `SSL/TLS support`_ section.
 
 Note: By default the property ``ssl.endpoint.identification.algorithm``
@@ -2416,10 +2498,12 @@ serializer.*
 
 Deprecated Properties
 
+======================  ============  ======================================================================================
 Name                    Default       Description
-======================  ============  ======================================================================
-hdfs.callTimeout        30000         Number of milliseconds allowed for HDFS operations, such as open, write, flush, close. This number should be increased if many HDFS timeout operations are occurring.
-======================  ============  ======================================================================
+======================  ============  ======================================================================================
+hdfs.callTimeout        30000         Number of milliseconds allowed for HDFS operations, such as open, write, flush, close.
+                                      This number should be increased if many HDFS timeout operations are occurring.
+======================  ============  ======================================================================================
 
 Example for agent named a1:
 
@@ -2429,7 +2513,7 @@ Example for agent named a1:
   a1.sinks = k1
   a1.sinks.k1.type = hdfs
   a1.sinks.k1.channel = c1
-  a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/%H%M/%S
+  a1.sinks.k1.hdfs.path = /flume/events/%Y-%m-%d/%H%M/%S
   a1.sinks.k1.hdfs.filePrefix = events-
   a1.sinks.k1.hdfs.round = true
   a1.sinks.k1.hdfs.roundValue = 10
@@ -2564,7 +2648,7 @@ Example for agent named a1:
  a1.sinks.k1.hive.metastore = thrift://127.0.0.1:9083
  a1.sinks.k1.hive.database = logsdb
  a1.sinks.k1.hive.table = weblogs
- a1.sinks.k1.hive.partition = asia,%{country},%y-%m-%d-%H-%M
+ a1.sinks.k1.hive.partition = asia,%{country},%Y-%m-%d-%H-%M
  a1.sinks.k1.useLocalTimeStamp = false
  a1.sinks.k1.round = true
  a1.sinks.k1.roundValue = 10
@@ -2997,74 +3081,6 @@ Example for agent named a1:
   # a1.sinks.k1.batchSize = 1000
   # a1.sinks.k1.batchDurationMillis = 1000
 
-ElasticSearchSink
-~~~~~~~~~~~~~~~~~
-
-This sink writes data to an elasticsearch cluster. By default, events will be written so that the `Kibana <http://kibana.org>`_ graphical interface
-can display them - just as if `logstash <https://logstash.net>`_ wrote them.
-
-The elasticsearch and lucene-core jars required for your environment must be placed in the lib directory of the Apache Flume installation.
-Elasticsearch requires that the major version of the client JAR match that of the server and that both are running the same minor version
-of the JVM. SerializationExceptions will appear if this is incorrect. To
-select the required version first determine the version of elasticsearch and the JVM version the target cluster is running. Then select an elasticsearch client
-library which matches the major version. A 0.19.x client can talk to a 0.19.x cluster; 0.20.x can talk to 0.20.x and 0.90.x can talk to 0.90.x. Once the
-elasticsearch version has been determined then read the pom.xml file to determine the correct lucene-core JAR version to use. The Flume agent
-which is running the ElasticSearchSink should also match the JVM the target cluster is running down to the minor version.
-
-Events will be written to a new index every day. The name will be <indexName>-yyyy-MM-dd where <indexName> is the indexName parameter. The sink
-will start writing to a new index at midnight UTC.
-
-Events are serialized for elasticsearch by the ElasticSearchLogStashEventSerializer by default. This behaviour can be
-overridden with the serializer parameter. This parameter accepts implementations of org.apache.flume.sink.elasticsearch.ElasticSearchEventSerializer
-or org.apache.flume.sink.elasticsearch.ElasticSearchIndexRequestBuilderFactory. Implementing ElasticSearchEventSerializer is deprecated in favour of
-the more powerful ElasticSearchIndexRequestBuilderFactory.
-
-The type is the FQCN: org.apache.flume.sink.elasticsearch.ElasticSearchSink
-
-Required properties are in **bold**.
-
-================  ======================================================================== =======================================================================================================
-Property Name     Default                                                                  Description
-================  ======================================================================== =======================================================================================================
-**channel**       --
-**type**          --                                                                       The component type name, needs to be ``org.apache.flume.sink.elasticsearch.ElasticSearchSink``
-**hostNames**     --                                                                       Comma separated list of hostname:port, if the port is not present the default port '9300' will be used
-indexName         flume                                                                    The name of the index which the date will be appended to. Example 'flume' -> 'flume-yyyy-MM-dd'
-                                                                                           Arbitrary header substitution is supported, eg. %{header} replaces with value of named event header
-indexType         logs                                                                     The type to index the document to, defaults to 'log'
-                                                                                           Arbitrary header substitution is supported, eg. %{header} replaces with value of named event header
-clusterName       elasticsearch                                                            Name of the ElasticSearch cluster to connect to
-batchSize         100                                                                      Number of events to be written per txn.
-ttl               --                                                                       TTL in days, when set will cause the expired documents to be deleted automatically,
-                                                                                           if not set documents will never be automatically deleted. TTL is accepted both in the earlier form of
-                                                                                           integer only e.g. a1.sinks.k1.ttl = 5 and also with a qualifier ms (millisecond), s (second), m (minute),
-                                                                                           h (hour), d (day) and w (week). Example a1.sinks.k1.ttl = 5d will set TTL to 5 days. Follow
-                                                                                           http://www.elasticsearch.org/guide/reference/mapping/ttl-field/ for more information.
-serializer        org.apache.flume.sink.elasticsearch.ElasticSearchLogStashEventSerializer The ElasticSearchIndexRequestBuilderFactory or ElasticSearchEventSerializer to use. Implementations of
-                                                                                           either class are accepted but ElasticSearchIndexRequestBuilderFactory is preferred.
-serializer.*      --                                                                       Properties to be passed to the serializer.
-================  ======================================================================== =======================================================================================================
-
-.. note:: Header substitution is a handy to use the value of an event header to dynamically decide the indexName and indexType to use when storing the event.
-          Caution should be used in using this feature as the event submitter now has control of the indexName and indexType.
-          Furthermore, if the elasticsearch REST client is used then the event submitter has control of the URL path used.
-
-Example for agent named a1:
-
-.. code-block:: properties
-
-  a1.channels = c1
-  a1.sinks = k1
-  a1.sinks.k1.type = elasticsearch
-  a1.sinks.k1.hostNames = 127.0.0.1:9200,127.0.0.2:9300
-  a1.sinks.k1.indexName = foo_index
-  a1.sinks.k1.indexType = bar_type
-  a1.sinks.k1.clusterName = foobar_cluster
-  a1.sinks.k1.batchSize = 500
-  a1.sinks.k1.ttl = 5d
-  a1.sinks.k1.serializer = org.apache.flume.sink.elasticsearch.ElasticSearchDynamicSerializer
-  a1.sinks.k1.channel = c1
-
 Kite Dataset Sink
 ~~~~~~~~~~~~~~~~~
 
@@ -4037,6 +4053,29 @@ In the above configuration, c3 is an optional channel. Failure to write to c3 is
 simply ignored. Since c1 and c2 are not marked optional, failure to write to
 those channels will cause the transaction to fail.
 
+Load Balancing Channel Selector
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Load balancing channel selector provides the ability to load-balance flow over multiple channels. This
+effectively allows the incoming data to be processed on multiple threads. It maintains an indexed list of active channels on which the load must be distributed. Implementation supports distributing load using either via round_robin or random selection mechanisms. The choice of selection mechanism defaults to round_robin type, but can be overridden via configuration.
+
+Required properties are in **bold**.
+
+==================  =====================  =================================================
+Property Name       Default                Description
+==================  =====================  =================================================
+selector.type       replicating            The component type name, needs to be ``load_balancing``
+selector.policy     ``round_robin``        Selection mechanism. Must be either ``round_robin`` or ``random``.
+==================  =====================  =================================================
+
+Example for agent named a1 and it's source called r1:
+
+.. code-block:: properties
+
+  a1.sources = r1
+  a1.channels = c1 c2 c3 c4
+  a1.sources.r1.channels = c1 c2 c3 c4
+  a1.sources.r1.selector.type = load_balancing
+  a1.sources.r1.selector.policy = round_robin
 
 Multiplexing Channel Selector
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -4279,7 +4318,7 @@ Example for agent named a1:
 
   a1.sinks.k1.type = hdfs
   a1.sinks.k1.channel = c1
-  a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/%H%M/%S
+  a1.sinks.k1.hdfs.path = /flume/events/%Y-%m-%d/%H%M/%S
   a1.sinks.k1.serializer = avro_event
   a1.sinks.k1.serializer.compressionCodec = snappy
 
@@ -4805,7 +4844,7 @@ Log4J Appender
 
 Appends Log4j events to a flume agent's avro source. A client using this
 appender must have the flume-ng-sdk in the classpath (eg,
-flume-ng-sdk-1.9.0.jar).
+flume-ng-sdk-1.10.0.jar).
 Required properties are in **bold**.
 
 =====================  =======  ==================================================================================
@@ -4869,7 +4908,7 @@ Load Balancing Log4J Appender
 
 Appends Log4j events to a list of flume agent's avro source. A client using this
 appender must have the flume-ng-sdk in the classpath (eg,
-flume-ng-sdk-1.9.0.jar). This appender supports a round-robin and random
+flume-ng-sdk-1.10.0.jar). This appender supports a round-robin and random
 scheme for performing the load balancing. It also supports a configurable backoff
 timeout so that down agents are removed temporarily from the set of hosts
 Required properties are in **bold**.
@@ -5028,33 +5067,33 @@ Sources 2
 Sinks 1
 ~~~~~~~
 
-+------------------------+-------------+------------+---------------+-------+--------+
-|                        | Avro/Thrift | AsyncHBase | ElasticSearch | HBase | HBase2 |
-+------------------------+-------------+------------+---------------+-------+--------+
-| BatchCompleteCount     | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| BatchEmptyCount        | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| BatchUnderflowCount    | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ChannelReadFail        | x           |            |               |       | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ConnectionClosedCount  | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ConnectionCreatedCount | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ConnectionFailedCount  | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| EventDrainAttemptCount | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| EventDrainSuccessCount | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| EventWriteFail         | x           |            |               |       | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| KafkaEventSendTimer    |             |            |               |       |        |
-+------------------------+-------------+------------+---------------+-------+--------+
-| RollbackCount          |             |            |               |       |        |
-+------------------------+-------------+------------+---------------+-------+--------+
++------------------------+-------------+------------+-------+--------+
+|                        | Avro/Thrift | AsyncHBase | HBase | HBase2 |
++------------------------+-------------+------------+-------+--------+-
+| BatchCompleteCount     | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| BatchEmptyCount        | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| BatchUnderflowCount    | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ChannelReadFail        | x           |            |       | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionClosedCount  | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionCreatedCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionFailedCount  | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventDrainAttemptCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventDrainSuccessCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventWriteFail         | x           |            |       | x      |
++------------------------+-------------+------------+-------+--------+
+| KafkaEventSendTimer    |             |            |       |        |
++------------------------+-------------+------------+-------+--------+
+| RollbackCount          |             |            |       |        |
++------------------------+-------------+------------+-------+--------+
 
 Sinks 2
 ~~~~~~~
@@ -5540,7 +5579,6 @@ org.apache.flume.Sink                                         hdfs
 org.apache.flume.Sink                                         hbase                   org.apache.flume.sink.hbase.HBaseSink
 org.apache.flume.Sink                                         hbase2                  org.apache.flume.sink.hbase2.HBase2Sink
 org.apache.flume.Sink                                         asynchbase              org.apache.flume.sink.hbase.AsyncHBaseSink
-org.apache.flume.Sink                                         elasticsearch           org.apache.flume.sink.elasticsearch.ElasticSearchSink
 org.apache.flume.Sink                                         file_roll               org.apache.flume.sink.RollingFileSink
 org.apache.flume.Sink                                         irc                     org.apache.flume.sink.irc.IRCSink
 org.apache.flume.Sink                                         thrift                  org.apache.flume.sink.ThriftSink
diff --git a/content/_sources/download.txt b/content/_sources/download.txt
index 2cffa8d..b72626a 100644
--- a/content/_sources/download.txt
+++ b/content/_sources/download.txt
@@ -10,8 +10,8 @@ originals on the main distribution server.
 
 .. csv-table::
 
-   "Apache Flume binary (tar.gz)",  `apache-flume-1.9.0-bin.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz>`_, `apache-flume-1.9.0-bin.tar.gz.sha512 <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz.sha512>`_, `apache-flume-1.9.0-bin.tar.gz.asc <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz.asc>`_
-  "Apache Flume source (tar.gz)",  `apache-flume-1.9.0-src.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.9.0/apache-flume-1.9.0-src.tar.gz>`_, `apache-flume-1.9.0-src.tar.gz.sha512 <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-src.tar.gz.sha512>`_, `apache-flume-1.9.0-src.tar.gz.asc <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-src.tar.gz.asc>`_
+   "Apache Flume binary (tar.gz)",  `apache-flume-1.10.0-bin.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz>`_, `apache-flume-1.10.0-bin.tar.gz.sha512 <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz.sha512>`_, `apache-flume-1.10.0-bin.tar.gz.asc <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz.asc>`_
+  "Apache Flume source (tar.gz)",  `apache-flume-1.10.0-src.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.10.0/apache-flume-1.10.0-src.tar.gz>`_, `apache-flume-1.10.0-src.tar.gz.sha512 <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-src.tar.gz.sha512>`_, `apache-flume-1.10.0-src.tar.gz.asc <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-src.tar.gz.asc>`_
 
 It is essential that you verify the integrity of the downloaded files using the PGP or MD5 signatures. Please read
 `Verifying Apache HTTP Server Releases <http://httpd.apache.org/dev/verification.html>`_ for more information on
@@ -23,9 +23,9 @@ as well as the asc signature file for the relevant distribution. Make sure you g
 Then verify the signatures using::
 
     % gpg --import KEYS
-    % gpg --verify apache-flume-1.9.0-src.tar.gz.asc
+    % gpg --verify apache-flume-1.10.0-src.tar.gz.asc
 
-Apache Flume 1.9.0 is signed by Ferenc Szabo 79E8E648
+Apache Flume 1.10.0 is signed by Ralph Goers B3D8E1BA
 
 Alternatively, you can verify the MD5 or SHA1 signatures of the files. A program called md5, md5sum, or shasum is included in many
 Unix distributions for this purpose.
diff --git a/content/_sources/index.txt b/content/_sources/index.txt
index b574411..a750734 100644
--- a/content/_sources/index.txt
+++ b/content/_sources/index.txt
@@ -31,6 +31,34 @@ application.
 
 .. rubric:: News
 
+.. raw:: html
+
+   <h3>June 5, 2022 - Apache Flume 1.10.0 Released</h3>
+
+The Apache Flume team is pleased to announce the release of Flume 1.10.0.
+
+Flume is a distributed, reliable, and available service for efficiently
+collecting, aggregating, and moving large amounts of streaming event data.
+
+Flume 1.10.0 is stable, production-ready software, and is backwards-compatible with
+previous versions of the Flume 1.x codeline.
+
+This version of Flume upgrades many dependencies, resolving the CVEs associated with them.
+Enhancements included in this release include the addition of a LoadBalancingChannelSelector,
+the ability to retrieve the Flume configuration from a remote source such as a Spring
+Cloud Config Server, and support for composite configurations.
+
+Flume has been updated to use Log4j 2.x instead of Log4j 1.x.
+
+The full change log and documentation are available on the
+`Flume 1.10.0 release page <releases/1.10.0.html>`__.
+
+This release can be downloaded from the Flume `Download <download.html>`__ page.
+
+Your contributions, feedback, help and support make Flume better!
+For more information on how to report problems or contribute,
+please visit our `Get Involved <getinvolved.html>`__ page.
+
 .. raw:: html
 
    <h3>January 8, 2019 - Apache Flume 1.9.0 Released</h3>
diff --git a/content/_sources/releases/1.10.0.txt b/content/_sources/releases/1.10.0.txt
new file mode 100644
index 0000000..681b5b1
--- /dev/null
+++ b/content/_sources/releases/1.10.0.txt
@@ -0,0 +1,69 @@
+===============
+Version 1.10.0
+===============
+
+.. rubric:: Status of this release
+
+Apache Flume 1.10.0 is the twelfth release of Flume as an Apache top-level project
+(TLP). Apache Flume 1.10.0 is production-ready software.
+
+.. rubric:: Release Documentation
+
+* `Flume 1.10.0 User Guide <content/1.10.0/FlumeUserGuide.html>`__ (also in `pdf <content/1.10.0/FlumeUserGuide.pdf>`__)
+* `Flume 1.10.0 Developer Guide <content/1.10.0/FlumeDeveloperGuide.html>`__ (also in `pdf <content/1.10.0/FlumeDeveloperGuide.pdf>`__)
+* `Flume 1.10.0 API Documentation <content/1.10.0/apidocs/index.html>`__
+
+.. rubric:: Changes
+
+Release Notes - Flume - Version v1.10.0
+
+** Bug
+    * [`FLUME-3151 <https://issues.apache.org/jira/browse/FLUME-3151>`__] - Upgrade Hadoop to 2.10.1 
+    * [`FLUME-3311 <https://issues.apache.org/jira/browse/FLUME-3311>`__] - Update Wrong Use In HDFS Sink 
+    * [`FLUME-3316 <https://issues.apache.org/jira/browse/FLUME-3316>`__] - Syslog Rfc3164Date test fails when the test date falls on a leap day 
+    * [`FLUME-3328 <https://issues.apache.org/jira/browse/FLUME-3328>`__] - Fix Deprecated Properties table of HDFS Sink 
+    * [`FLUME-3356 <https://issues.apache.org/jira/browse/FLUME-3356>`__] - Probable security issue in Flume 
+    * [`FLUME-3360 <https://issues.apache.org/jira/browse/FLUME-3360>`__] - Maven assemble failed on macOS 
+    * [`FLUME-3395 <https://issues.apache.org/jira/browse/FLUME-3395>`__] - Fix for CVE-2021-44228 
+    * [`FLUME-3407 <https://issues.apache.org/jira/browse/FLUME-3407>`__] - workaround for jackson-mapper-asl-1.9.13.jar  @ flume-ng 
+    * [`FLUME-3409 <https://issues.apache.org/jira/browse/FLUME-3409>`__] - upgrade httpclient due to cve 
+    * [`FLUME-3416 <https://issues.apache.org/jira/browse/FLUME-3416>`__] - Improve input validation 
+    * [`FLUME-3421 <https://issues.apache.org/jira/browse/FLUME-3421>`__] - Default log4j settings do not log to console after FLUME-2050 
+    * [`FLUME-3426 <https://issues.apache.org/jira/browse/FLUME-3426>`__] - Unresolved Security Issues 
+
+** New Feature
+    * [`FLUME-3412 <https://issues.apache.org/jira/browse/FLUME-3412>`__] - Add LoadBalancingChannelSelector 
+
+** Improvement
+    * [`FLUME-199 <https://issues.apache.org/jira/browse/FLUME-199>`__] - Unit tests should hunt for available ports if defaults are in use 
+    * [`FLUME-2050 <https://issues.apache.org/jira/browse/FLUME-2050>`__] - Upgrade to log4j2 (when GA) 
+    * [`FLUME-3045 <https://issues.apache.org/jira/browse/FLUME-3045>`__] - Document GitHub Pull Requests in How to Contribute Guide 
+    * [`FLUME-3335 <https://issues.apache.org/jira/browse/FLUME-3335>`__] - Support configuration and reconfiguration via HTTP(S) 
+    * [`FLUME-3338 <https://issues.apache.org/jira/browse/FLUME-3338>`__] - Doc Flume Recoverability with Kafka 
+    * [`FLUME-3363 <https://issues.apache.org/jira/browse/FLUME-3363>`__] - CVE-2019-20445 
+    * [`FLUME-3368 <https://issues.apache.org/jira/browse/FLUME-3368>`__] - Update Jackson to 2.9.10 
+    * [`FLUME-3389 <https://issues.apache.org/jira/browse/FLUME-3389>`__] - Build and test Apache Flume on ARM64 CPU architecture 
+    * [`FLUME-3397 <https://issues.apache.org/jira/browse/FLUME-3397>`__] - Upgrade Log4 to 2.17.1 and SLF4J to 1.7.32 
+    * [`FLUME-3398 <https://issues.apache.org/jira/browse/FLUME-3398>`__] - Upgrade Kafka to a supported version. 
+    * [`FLUME-3399 <https://issues.apache.org/jira/browse/FLUME-3399>`__] - Update Jackson to 2.13.1 
+    * [`FLUME-3403 <https://issues.apache.org/jira/browse/FLUME-3403>`__] - The parquet-avro version used by flume is 1.4.1, which is vulnerable.
+    * [`FLUME-3405 <https://issues.apache.org/jira/browse/FLUME-3405>`__] - Reopened - The parquet-avro version used by flume is 1.4.1, which is vulnerable.
+    * [`FLUME-3413 <https://issues.apache.org/jira/browse/FLUME-3413>`__] - Add "initialization" phase to components. 
+
+** Wish
+    * [`FLUME-3400 <https://issues.apache.org/jira/browse/FLUME-3400>`__] - Upgrade commons-io to 2.11.0 
+
+** Task
+    * [`FLUME-3401 <https://issues.apache.org/jira/browse/FLUME-3401>`__] - Remove Kite Dataset Sink 
+    * [`FLUME-3402 <https://issues.apache.org/jira/browse/FLUME-3402>`__] - remove org.codehaus.jackson dependencies 
+    * [`FLUME-3404 <https://issues.apache.org/jira/browse/FLUME-3404>`__] - Update Commons CLI to 1.5.0, Commons Codec to 1.15, Commons Compress to 1.21 and Commons Lang to 2.6 
+    * [`FLUME-3410 <https://issues.apache.org/jira/browse/FLUME-3410>`__] - upgrade hbase version 
+    * [`FLUME-3411 <https://issues.apache.org/jira/browse/FLUME-3411>`__] - upgrade hive sink to 1.2.2 
+    * [`FLUME-3417 <https://issues.apache.org/jira/browse/FLUME-3417>`__] - Remove Elasticsearch sink that requires Elasticsearch 0.90.1 
+    * [`FLUME-3419 <https://issues.apache.org/jira/browse/FLUME-3419>`__] - Review project LICENSE and NOTICE 
+    * [`FLUME-3424 <https://issues.apache.org/jira/browse/FLUME-3424>`__] - Upgrade Twitter4j to version 4.0.7+ 
+
+** Dependency upgrade
+    * [`FLUME-3339 <https://issues.apache.org/jira/browse/FLUME-3339>`__] - Remove Xerces and Xalan dependencies 
+    * [`FLUME-3385 <https://issues.apache.org/jira/browse/FLUME-3385>`__] - flume-ng-sdk uses Avro-IPC version with vulnerable version of Jetty 
+    * [`FLUME-3386 <https://issues.apache.org/jira/browse/FLUME-3386>`__] - flume-ng-sdk uses vulnerable version of netty 
diff --git a/content/_sources/releases/index.txt b/content/_sources/releases/index.txt
index e9c353e..7906568 100644
--- a/content/_sources/releases/index.txt
+++ b/content/_sources/releases/index.txt
@@ -3,13 +3,13 @@ Releases
 
 .. rubric:: Current Release
 
-The current stable release is `Apache Flume Version 1.9.0 <1.9.0.html>`__.
+The current stable release is `Apache Flume Version 1.10.0 <1.10.0.html>`__.
 
 .. toctree::
    :maxdepth: 1
    :hidden:
 
-   1.9.0
+   1.10.0
 
 .. rubric:: Previous Releases
 
@@ -17,6 +17,7 @@ The current stable release is `Apache Flume Version 1.9.0 <1.9.0.html>`__.
    :maxdepth: 1
    :glob:
 
+   1.9.0
    1.8.0
    1.7.0
    1.6.0
diff --git a/content/_sources/team.txt b/content/_sources/team.txt
index 6e5405b..b1a4c63 100644
--- a/content/_sources/team.txt
+++ b/content/_sources/team.txt
@@ -10,39 +10,47 @@ Team
  to the Members. The number of Contributors to the project is unbounded. Get involved today. All contributions
  to the project are greatly appreciated.
 
- The following individuals are recognized as PMC Members or Project Committers.
+ The following individuals are recognized as currently active PMC Members or Project Committers.
 
 .. csv-table::
    :header: "Name", "Email", "Id", "Organization", "Role"
    :widths: 30, 25, 15, 15, 15
 
-   "Aaron Kimball", "kimballa@apache.org", "kimballa", "Zymergen", "PMC Member"
-   "Ashish Paliwal", "apaliwal@apache.org", "apaliwal", "Apple", "Committer"
-   "Andrew Bayer", "abayer@apache.org", "abayer", "CloudBees", "PMC Member"
-   "Ahmed Radwan", "ahmed@apache.org", "ahmed", "Apple", "PMC Member"
-   "Arvind Prabhakar", "arvind@apache.org", "arvind", "StreamSets", "PMC Member"
-   "Balázs Donát Bessenyei", "bessbd@apache.org", "bessbd", "Ericsson", "PMC Member"
-   "Brock Noland", "brock@apache.org", "brock", "phData", "PMC Member"
-   "Bruce Mitchener", "brucem@apache.org", "brucem", "Data Fueled", "PMC Member"
-   "Derek Deeter", "ddeeter@apache.org", "ddeeter", "Vanderbilt University", "PMC Member"
-   "Denes Arvay", "denes@apache.org", "denes", "Cloudera", "PMC Member"
-   "Eric Sammer", "esammer@apache.org", "esammer", "Splunk", "PMC Member"
-   "Hari Shreedharan", "hshreedharan@apache.org", "hshreedharan", "StreamSets", "PMC Member"
-   "Henry Robinson", "henry@apache.org", "henry", "Cloudera", "PMC Member"
-   "Jaroslav Cecho", "jarcec@apache.org", "jarcec", "StreamSets", "PMC Member"
-   "Johny Rufus", "johnyrufus@apache.org", "johnyrufus", "Microsoft", "Committer"
-   "Jonathan Hsieh", "jmhsieh@apache.org", "jmhsieh", "Cloudera", "PMC Member"
-   "Juhani Connolly", "juhanic@apache.org", "juhanic", "CyberAgent", "PMC Member"
-   "Mike Percy", "mpercy@apache.org", "mpercy", "Cloudera", "PMC Member"
-   "Mingjie Lai", "mlai@apache.org", "mlai", "Apple", "PMC Member"
-   "Mubarak Seyed", "mubarak@apache.org","mubarak", "Apple", "Committer"
-   "Nick Verbeck", "nerdynick@apache.org", "nerdynick", "", "PMC Member"
-   "Patrick Hunt", "phunt@apache.org", "phunt", "Cloudera", "PMC Member"
-   "Patrick Wendell", "pwendell@apache.org", "pwendell", "Databricks", "Committer"
-   "Prasad Mujumdar", "prasadm@apache.org", "prasadm", "BlueTalon", "PMC Member"
-   "Ralph Goers", "rgoers@apache.org", "rgoers", "Nextiva", "PMC Member"
-   "Roshan Naik", "roshannaik@apache.org", "roshannaik", "Hortonworks", "PMC Member"
-   "Attila Simon", "sati@apache.org", "sati", "RapidMiner", "Committer"
-   "Ferenc Szabo", "szaboferee@apache.org", "szaboferee", "Cloudera", "Committer"
-   "Wolfgang Hoschek", "whoschek@apache.org", "whoschek", "Cloudera", "Committer"
-   "Will McQueen", "will@apache.org", "will", "", "PMC Member"
+   "Arvind Prabhakar", "arvind at apache.org", "arvind", "StreamSets", "PMC Member"
+   "Balázs Donát Bessenyei", "bessbd at apache.org", "bessbd", "Ericsson", "PMC Chair"
+   "Denes Arvay", "denes at apache.org", "denes", "Cloudera", "PMC Member"
+   "Jaroslav Cecho", "jarcec at apache.org", "jarcec", "StreamSets", "PMC Member"
+   "Jonathan Hsieh", "jmhsieh at apache.org", "jmhsieh", "Cloudera", "PMC Member"
+   "Juhani Connolly", "juhanic at apache.org", "juhanic", "CyberAgent", "PMC Member"
+   "Mike Percy", "mpercy at apache.org", "mpercy", "Cloudera", "PMC Member"
+   "Ahmed Radwan", "ahmed at apache.org", "ahmed", "Apple", "PMC Member"
+   "Ralph Goers", "rgoers at apache.org", "rgoers", "Nextiva", "PMC Member"
+   "Tristan Stevens", "tristan at apache.org", "tristan", "Cloudera", "PMC Member"
+
+The following individuals are recognized as former PMC Members or Project Committers
+
+.. csv-table::
+   :header: "Name", "Email", "Id", "Organization", "Role", "Status"
+   :widths: 25, 25, 10, 10, 15, 15
+
+   "Aaron Kimball", "kimballa at apache.org", "kimballa", "Zymergen", "PMC Member", "Last active 2011"
+   "Ashish Paliwal", "apaliwal at apache.org", "apaliwal", "Apple", "Committer", "Last active 2017"
+   "Andrew Bayer", "abayer at apache.org", "abayer", "CloudBees", "PMC Member", "Last active 2015"
+   "Brock Noland", "brock at apache.org", "brock", "phData", "PMC Member", "Last active 2019"
+   "Bruce Mitchener", "brucem at apache.org", "brucem", "Data Fueled", "PMC Member", "Last active - project creation"
+   "Derek Deeter", "ddeeter at apache.org", "ddeeter", "Vanderbilt University", "PMC Member", "Last active - project creation"
+   "Eric Sammer", "esammer at apache.org", "esammer", "Splunk", "PMC Member", "Last active 2017"
+   "Hari Shreedharan", "hshreedharan at apache.org", "hshreedharan", "StreamSets", "PMC Member", "Emeritus 2022"
+   "Henry Robinson", "henry at apache.org", "henry", "Cloudera", "PMC Member", "Last active - project creation"
+   "Johny Rufus", "johnyrufus at apache.org", "johnyrufus", "Microsoft", "Committer", "Last active 2017"
+   "Mingjie Lai", "mlai at apache.org", "mlai", "Apple", "PMC Member", "Last active 2012"
+   "Mubarak Seyed", "mubarak at apache.org","mubarak", "Apple", "Committer", "Last active 2017"
+   "Nick Verbeck", "nerdynick at apache.org", "nerdynick", "", "PMC Member", "Last active 2011"
+   "Patrick Hunt", "phunt at apache.org", "phunt", "Cloudera", "PMC Member", "Last active 2012"
+   "Patrick Wendell", "pwendell at apache.org", "pwendell", "Databricks", "Committer", "Last active 2015"
+   "Prasad Mujumdar", "prasadm at apache.org", "prasadm", "BlueTalon", "PMC Member", "Last active 2015"
+   "Roshan Naik", "roshannaik at apache.org", "roshannaik", "Hortonworks", "PMC Member", "Last active 2017"
+   "Attila Simon", "sati at apache.org", "sati", "RapidMiner", "Committer", "Last active 2017"
+   "Ferenc Szabo", "szaboferee at apache.org", "szaboferee", "Cloudera", "Committer", "Last active 2019"
+   "Wolfgang Hoschek", "whoschek at apache.org", "whoschek", "Cloudera", "Committer", "Last active 2016"
+   "Will McQueen", "will at apache.org", "will", "", "PMC Member", "Last active 2017"
diff --git a/content/documentation.html b/content/documentation.html
index a061329..9442eee 100644
--- a/content/documentation.html
+++ b/content/documentation.html
@@ -25,7 +25,7 @@
     <script type="text/javascript" src="_static/underscore.js"></script>
     <script type="text/javascript" src="_static/doctools.js"></script>
     <link rel="top" title="Apache Flume" href="index.html" />
-    <link rel="next" title="Flume 1.9.0 User Guide" href="FlumeUserGuide.html" />
+    <link rel="next" title="Flume 1.10.0 User Guide" href="FlumeUserGuide.html" />
     <link rel="prev" title="Download" href="download.html" /> 
   </head>
   <body>
@@ -120,7 +120,7 @@ been released.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/download.html b/content/download.html
index 552d5ba..ed42927 100644
--- a/content/download.html
+++ b/content/download.html
@@ -74,14 +74,14 @@ originals on the main distribution server.</p>
 </colgroup>
 <tbody valign="top">
 <tr class="row-odd"><td>Apache Flume binary (tar.gz)</td>
-<td><a class="reference external" href="http://www.apache.org/dyn/closer.lua/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz">apache-flume-1.9.0-bin.tar.gz</a></td>
-<td><a class="reference external" href="http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz.sha512">apache-flume-1.9.0-bin.tar.gz.sha512</a></td>
-<td><a class="reference external" href="http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz.asc">apache-flume-1.9.0-bin.tar.gz.asc</a></td>
+<td><a class="reference external" href="http://www.apache.org/dyn/closer.lua/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz">apache-flume-1.10.0-bin.tar.gz</a></td>
+<td><a class="reference external" href="http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz.sha512">apache-flume-1.10.0-bin.tar.gz.sha512</a></td>
+<td><a class="reference external" href="http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz.asc">apache-flume-1.10.0-bin.tar.gz.asc</a></td>
 </tr>
 <tr class="row-even"><td>Apache Flume source (tar.gz)</td>
-<td><a class="reference external" href="http://www.apache.org/dyn/closer.lua/flume/1.9.0/apache-flume-1.9.0-src.tar.gz">apache-flume-1.9.0-src.tar.gz</a></td>
-<td><a class="reference external" href="http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-src.tar.gz.sha512">apache-flume-1.9.0-src.tar.gz.sha512</a></td>
-<td><a class="reference external" href="http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-src.tar.gz.asc">apache-flume-1.9.0-src.tar.gz.asc</a></td>
+<td><a class="reference external" href="http://www.apache.org/dyn/closer.lua/flume/1.10.0/apache-flume-1.10.0-src.tar.gz">apache-flume-1.10.0-src.tar.gz</a></td>
+<td><a class="reference external" href="http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-src.tar.gz.sha512">apache-flume-1.10.0-src.tar.gz.sha512</a></td>
+<td><a class="reference external" href="http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-src.tar.gz.asc">apache-flume-1.10.0-src.tar.gz.asc</a></td>
 </tr>
 </tbody>
 </table>
@@ -93,10 +93,10 @@ as well as the asc signature file for the relevant distribution. Make sure you g
 <a class="reference external" href="http://www.apache.org/dist/flume/">main distribution directory</a> rather than from a mirror.
 Then verify the signatures using:</p>
 <div class="highlight-none"><div class="highlight"><pre>% gpg --import KEYS
-% gpg --verify apache-flume-1.9.0-src.tar.gz.asc
+% gpg --verify apache-flume-1.10.0-src.tar.gz.asc
 </pre></div>
 </div>
-<p>Apache Flume 1.9.0 is signed by Ferenc Szabo 79E8E648</p>
+<p>Apache Flume 1.10.0 is signed by Ralph Goers B3D8E1BA</p>
 <p>Alternatively, you can verify the MD5 or SHA1 signatures of the files. A program called md5, md5sum, or shasum is included in many
 Unix distributions for this purpose.</p>
 <p class="rubric">Previous_Releases</p>
@@ -146,7 +146,7 @@ Unix distributions for this purpose.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/getinvolved.html b/content/getinvolved.html
index 65a364c..ed83457 100644
--- a/content/getinvolved.html
+++ b/content/getinvolved.html
@@ -115,7 +115,7 @@ you find at: <a class="reference external" href="https://issues.apache.org/jira/
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/index.html b/content/index.html
index 549dfe3..4a9dc60 100644
--- a/content/index.html
+++ b/content/index.html
@@ -71,6 +71,22 @@ application.</p>
 <img alt="Agent component diagram" src="_images/DevGuide_image00.png" />
 </div>
 <p class="rubric">News</p>
+<h3>June 5, 2022 - Apache Flume 1.10.0 Released</h3><p>The Apache Flume team is pleased to announce the release of Flume 1.10.0.</p>
+<p>Flume is a distributed, reliable, and available service for efficiently
+collecting, aggregating, and moving large amounts of streaming event data.</p>
+<p>Flume 1.10.0 is stable, production-ready software, and is backwards-compatible with
+previous versions of the Flume 1.x codeline.</p>
+<p>This version of Flume upgrades many dependencies, resolving the CVEs associated with them.
+Enhancements included in this release include the addition of a LoadBalancingChannelSelector,
+the ability to retrieve the Flume configuration from a remote source such as a Spring
+Cloud Config Server, and support for composite configurations.</p>
+<p>Flume has been updated to use Log4j 2.x instead of Log4j 1.x.</p>
+<p>The full change log and documentation are available on the
+<a class="reference external" href="releases/1.10.0.html">Flume 1.10.0 release page</a>.</p>
+<p>This release can be downloaded from the Flume <a class="reference external" href="download.html">Download</a> page.</p>
+<p>Your contributions, feedback, help and support make Flume better!
+For more information on how to report problems or contribute,
+please visit our <a class="reference external" href="getinvolved.html">Get Involved</a> page.</p>
 <h3>January 8, 2019 - Apache Flume 1.9.0 Released</h3><p>The Apache Flume team is pleased to announce the release of Flume 1.9.0.</p>
 <p>Flume is a distributed, reliable, and available service for efficiently
 collecting, aggregating, and moving large amounts of streaming event data.</p>
@@ -511,7 +527,7 @@ Feel free to post to the User&#8217;s mailing list with any questions.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/license.html b/content/license.html
index 84da257..3ef596a 100644
--- a/content/license.html
+++ b/content/license.html
@@ -198,7 +198,7 @@ accepting any such warranty or additional liability.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/mailinglists.html b/content/mailinglists.html
index c56d266..c28fa1f 100644
--- a/content/mailinglists.html
+++ b/content/mailinglists.html
@@ -144,7 +144,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.0.0.html b/content/releases/1.0.0.html
index 5227703..2b429f8 100644
--- a/content/releases/1.0.0.html
+++ b/content/releases/1.0.0.html
@@ -313,7 +313,7 @@ Incubator, and is labelled and should be considered as a beta version.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.1.0.html b/content/releases/1.1.0.html
index 6697115..07d97f9 100644
--- a/content/releases/1.1.0.html
+++ b/content/releases/1.1.0.html
@@ -199,7 +199,7 @@ it is likely to change until a stable release version.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.8.0.html b/content/releases/1.10.0.html
similarity index 51%
copy from content/releases/1.8.0.html
copy to content/releases/1.10.0.html
index 76822f3..7074cb1 100644
--- a/content/releases/1.8.0.html
+++ b/content/releases/1.10.0.html
@@ -7,7 +7,7 @@
   <head>
     <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
     
-    <title>Version 1.8.0 &mdash; Apache Flume</title>
+    <title>Version 1.10.0 &mdash; Apache Flume</title>
     
     <link rel="stylesheet" href="../_static/flume.css" type="text/css" />
     <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
@@ -26,8 +26,8 @@
     <script type="text/javascript" src="../_static/doctools.js"></script>
     <link rel="top" title="Apache Flume" href="../index.html" />
     <link rel="up" title="Releases" href="index.html" />
-    <link rel="next" title="Version 1.7.0" href="1.7.0.html" />
-    <link rel="prev" title="Version 1.9.0" href="1.9.0.html" /> 
+    <link rel="next" title="Version 1.9.0" href="1.9.0.html" />
+    <link rel="prev" title="Releases" href="index.html" /> 
   </head>
   <body>
 <div class="header">
@@ -60,96 +60,81 @@
         <div class="bodywrapper">
           <div class="body">
             
-  <div class="section" id="version-1-8-0">
-<h1>Version 1.8.0<a class="headerlink" href="#version-1-8-0" title="Permalink to this headline">¶</a></h1>
+  <div class="section" id="version-1-10-0">
+<h1>Version 1.10.0<a class="headerlink" href="#version-1-10-0" title="Permalink to this headline">¶</a></h1>
 <p class="rubric">Status of this release</p>
-<p>Apache Flume 1.8.0 is the eleventh release of Flume as an Apache top-level project
-(TLP). Apache Flume 1.8.0 is production-ready software.</p>
+<p>Apache Flume 1.10.0 is the twelfth release of Flume as an Apache top-level project
+(TLP). Apache Flume 1.10.0 is production-ready software.</p>
 <p class="rubric">Release Documentation</p>
 <ul class="simple">
-<li><a class="reference external" href="content/1.8.0/FlumeUserGuide.html">Flume 1.8.0 User Guide</a> (also in <a class="reference external" href="content/1.8.0/FlumeUserGuide.pdf">pdf</a>)</li>
-<li><a class="reference external" href="content/1.8.0/FlumeDeveloperGuide.html">Flume 1.8.0 Developer Guide</a> (also in <a class="reference external" href="content/1.8.0/FlumeDeveloperGuide.pdf">pdf</a>)</li>
-<li><a class="reference external" href="content/1.8.0/apidocs/index.html">Flume 1.8.0 API Documentation</a></li>
+<li><a class="reference external" href="content/1.10.0/FlumeUserGuide.html">Flume 1.10.0 User Guide</a> (also in <a class="reference external" href="content/1.10.0/FlumeUserGuide.pdf">pdf</a>)</li>
+<li><a class="reference external" href="content/1.10.0/FlumeDeveloperGuide.html">Flume 1.10.0 Developer Guide</a> (also in <a class="reference external" href="content/1.10.0/FlumeDeveloperGuide.pdf">pdf</a>)</li>
+<li><a class="reference external" href="content/1.10.0/apidocs/index.html">Flume 1.10.0 API Documentation</a></li>
 </ul>
 <p class="rubric">Changes</p>
-<p>Release Notes - Flume - Version v1.8.0</p>
+<p>Release Notes - Flume - Version v1.10.0</p>
 <dl class="docutils">
-<dt>** New Feature</dt>
-<dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2171">FLUME-2171</a>] - Add Interceptor to remove headers from event</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2917">FLUME-2917</a>] - Provide netcat UDP source as alternative to TCP</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2993">FLUME-2993</a>] - Support environment variables in configuration files</li>
-</ul>
-</dd>
-<dt>** Improvement</dt>
-<dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-1520">FLUME-1520</a>] - Timestamp interceptor should support custom headers</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2945">FLUME-2945</a>] - Bump java target version to 1.8</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3020">FLUME-3020</a>] - Improve HDFSEventSink Escape Ingestion by more then 10x by not getting InetAddress on every record</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3025">FLUME-3025</a>] - Expose FileChannel.open on JMX</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3072">FLUME-3072</a>] - Add IP address to headers in flume log4j appender</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3092">FLUME-3092</a>] - Extend the FileChannel&#8217;s monitoring metrics</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3100">FLUME-3100</a>] - Support arbitrary header substitution for topic of Kafka</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3144">FLUME-3144</a>] - Improve Log4jAppender&#8217;s performance by allowing logging collection of messages</li>
-</ul>
-</dd>
 <dt>** Bug</dt>
 <dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2620">FLUME-2620</a>] - File channel throws NullPointerException if a header value is null</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2752">FLUME-2752</a>] - Flume AvroSource will leak the memory and the OOM will be happened.</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2812">FLUME-2812</a>] - Exception in thread &#8220;SinkRunner-PollingRunner-DefaultSinkProcessor&#8221; java.lang.Error: Maximum permit count exceeded</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2857">FLUME-2857</a>] - Kafka Source/Channel/Sink does not restore default values when live update config</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2905">FLUME-2905</a>] - NetcatSource - Socket not closed when an exception is encountered during start() leading to file descriptor leaks</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2991">FLUME-2991</a>] - ExecSource command execution starts before starting the sourceCounter</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3027">FLUME-3027</a>] - Kafka Channel should clear offsets map after commit</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3031">FLUME-3031</a>] - sequence source should reset its counter for event body on channel exception</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3043">FLUME-3043</a>] - KafkaSink SinkCallback throws NullPointerException when Log4J level is debug</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3046">FLUME-3046</a>] - Kafka Sink and Source Configuration Improvements</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3049">FLUME-3049</a>] - Wrapping the exception into SecurityException in UGIExecutor.execute hides the original one</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3057">FLUME-3057</a>] - Build fails due to unsupported snappy-java version on ppc64le</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3080">FLUME-3080</a>] - Close failure in HDFS Sink might cause data loss</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3083">FLUME-3083</a>] - Taildir source can miss events if file updated in same second as file close</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3085">FLUME-3085</a>] - HDFS Sink can skip flushing some BucketWriters, might lead to data loss</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3112">FLUME-3112</a>] - Upgrade jackson-core library dependency</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3127">FLUME-3127</a>] - Upgrade libfb303 library dependency</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3131">FLUME-3131</a>] - Upgrade spring framework library dependencies</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3132">FLUME-3132</a>] - Upgrade tomcat jasper library dependencies</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3135">FLUME-3135</a>] - property logger in org.apache.flume.interceptor.RegexFilteringInterceptor confused</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3141">FLUME-3141</a>] - Small typo found in RegexHbaseEventSerializer.java</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3152">FLUME-3152</a>] - Add Flume Metric for Backup Checkpoint Errors</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3155">FLUME-3155</a>] - Use batch mode in mvn to fix Travis CI error</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3157">FLUME-3157</a>] - Refactor TestHDFSEventSinkOnMiniCluster to not use LeaseManager private API</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3173">FLUME-3173</a>] - Upgrade joda-time</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3174">FLUME-3174</a>] - HdfsSink AWS S3A authentication does not work on JDK 8</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3175">FLUME-3175</a>] - Javadoc generation fails due to Java8&#8217;s strict doclint</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3151">FLUME-3151</a>] - Upgrade Hadoop to 2.10.1</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3311">FLUME-3311</a>] - Update Wrong Use In HDFS Sink</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3316">FLUME-3316</a>] - Syslog Rfc3164Date test fails when the test date falls on a leap day</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3328">FLUME-3328</a>] - Fix Deprecated Properties table of HDFS Sink</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3356">FLUME-3356</a>] - Probable security issue in Flume</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3360">FLUME-3360</a>] - Maven assemble failed on macOS</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3395">FLUME-3395</a>] - Fix for CVE-2021-44228</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3407">FLUME-3407</a>] - workaround for jackson-mapper-asl-1.9.13.jar  &#64; flume-ng</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3409">FLUME-3409</a>] - upgrade httpclient due to cve</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3416">FLUME-3416</a>] - Improve input validation</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3421">FLUME-3421</a>] - Default log4j settings do not log to console after FLUME-2050</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3426">FLUME-3426</a>] - Unresolved Security Issues</li>
 </ul>
 </dd>
-<dt>** Documentation</dt>
+<dt>** New Feature</dt>
 <dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2175">FLUME-2175</a>] - Update Developer Guide with notes on how to upgrade Protocol Buffer version</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2817">FLUME-2817</a>] - Sink for multi-agent flow example in user guide is set up incorrectly</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3412">FLUME-3412</a>] - Add LoadBalancingChannelSelector</li>
 </ul>
 </dd>
-<dt>** Wish</dt>
+<dt>** Improvement</dt>
 <dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2579">FLUME-2579</a>] - JMS source support durable subscriptions and message listening</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-199">FLUME-199</a>] - Unit tests should hunt for available ports if defaults are in use</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2050">FLUME-2050</a>] - Upgrade to log4j2 (when GA)</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3045">FLUME-3045</a>] - Document GitHub Pull Requests in How to Contribute Guide</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3335">FLUME-3335</a>] - Support configuration and reconfiguration via HTTP(S)</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3338">FLUME-3338</a>] - Doc Flume Recoverability with Kafka</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3363">FLUME-3363</a>] - CVE-2019-20445</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3368">FLUME-3368</a>] - Update Jackson to 2.9.10</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3389">FLUME-3389</a>] - Build and test Apache Flume on ARM64 CPU architecture</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3397">FLUME-3397</a>] - Upgrade Log4 to 2.17.1 and SLF4J to 1.7.32</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3398">FLUME-3398</a>] - Upgrade Kafka to a supported version.</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3399">FLUME-3399</a>] - Update Jackson to 2.13.1</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3403">FLUME-3403</a>] - The parquet-avro version used by flume is 1.4.1, which is vulnerable.</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3405">FLUME-3405</a>] - Reopened - The parquet-avro version used by flume is 1.4.1, which is vulnerable.</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3413">FLUME-3413</a>] - Add &#8220;initialization&#8221; phase to components.</li>
 </ul>
 </dd>
-<dt>** Question</dt>
+<dt>** Wish</dt>
 <dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2427">FLUME-2427</a>] - java.lang.NoSuchMethodException and warning on HDFS (S3) sink</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3400">FLUME-3400</a>] - Upgrade commons-io to 2.11.0</li>
 </ul>
 </dd>
 <dt>** Task</dt>
 <dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3093">FLUME-3093</a>] - Groundwork for version changes in root pom</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3154">FLUME-3154</a>] - Add HBase client version check to AsyncHBaseSink and HBaseSink</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3401">FLUME-3401</a>] - Remove Kite Dataset Sink</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3402">FLUME-3402</a>] - remove org.codehaus.jackson dependencies</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3404">FLUME-3404</a>] - Update Commons CLI to 1.5.0, Commons Codec to 1.15, Commons Compress to 1.21 and Commons Lang to 2.6</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3410">FLUME-3410</a>] - upgrade hbase version</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3411">FLUME-3411</a>] - upgrade hive sink to 1.2.2</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3417">FLUME-3417</a>] - Remove Elasticsearch sink that requires Elasticsearch 0.90.1</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3419">FLUME-3419</a>] - Review project LICENSE and NOTICE</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3424">FLUME-3424</a>] - Upgrade Twitter4j to version 4.0.7+</li>
 </ul>
 </dd>
-<dt>** Test</dt>
+<dt>** Dependency upgrade</dt>
 <dd><ul class="first last simple">
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-2997">FLUME-2997</a>] - Fix flaky junit test in SpillableMemoryChannel</li>
-<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3002">FLUME-3002</a>] - Some tests in TestBucketWriter are flaky</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3339">FLUME-3339</a>] - Remove Xerces and Xalan dependencies</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3385">FLUME-3385</a>] - flume-ng-sdk uses Avro-IPC version with vulnerable version of Jetty</li>
+<li>[<a class="reference external" href="https://issues.apache.org/jira/browse/FLUME-3386">FLUME-3386</a>] - flume-ng-sdk uses vulnerable version of netty</li>
 </ul>
 </dd>
 </dl>
@@ -198,7 +183,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.2.0.html b/content/releases/1.2.0.html
index b2065e7..416c023 100644
--- a/content/releases/1.2.0.html
+++ b/content/releases/1.2.0.html
@@ -320,7 +320,7 @@ and functionality along with bug fixes and other enhancements.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.3.0.html b/content/releases/1.3.0.html
index 438c976..6ba4314 100644
--- a/content/releases/1.3.0.html
+++ b/content/releases/1.3.0.html
@@ -305,7 +305,7 @@ enhancements.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.3.1.html b/content/releases/1.3.1.html
index 04eede7..68c1c02 100644
--- a/content/releases/1.3.1.html
+++ b/content/releases/1.3.1.html
@@ -142,7 +142,7 @@ several bug fixes and performance enhancements.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.4.0.html b/content/releases/1.4.0.html
index 144fa71..3a17077 100644
--- a/content/releases/1.4.0.html
+++ b/content/releases/1.4.0.html
@@ -327,7 +327,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.5.0.1.html b/content/releases/1.5.0.1.html
index 01afd23..1eca71d 100644
--- a/content/releases/1.5.0.1.html
+++ b/content/releases/1.5.0.1.html
@@ -127,7 +127,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.5.0.html b/content/releases/1.5.0.html
index 636e13b..0d77fc1 100644
--- a/content/releases/1.5.0.html
+++ b/content/releases/1.5.0.html
@@ -259,7 +259,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.5.2.html b/content/releases/1.5.2.html
index 55da54c..ac58b4c 100644
--- a/content/releases/1.5.2.html
+++ b/content/releases/1.5.2.html
@@ -132,7 +132,7 @@ sources and sinks.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.6.0.html b/content/releases/1.6.0.html
index 8eca640..6f62c26 100644
--- a/content/releases/1.6.0.html
+++ b/content/releases/1.6.0.html
@@ -257,7 +257,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.7.0.html b/content/releases/1.7.0.html
index e2b936b..fbfd4ee 100644
--- a/content/releases/1.7.0.html
+++ b/content/releases/1.7.0.html
@@ -245,7 +245,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.8.0.html b/content/releases/1.8.0.html
index 76822f3..ecbf570 100644
--- a/content/releases/1.8.0.html
+++ b/content/releases/1.8.0.html
@@ -198,7 +198,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/1.9.0.html b/content/releases/1.9.0.html
index 821872f..b52e5df 100644
--- a/content/releases/1.9.0.html
+++ b/content/releases/1.9.0.html
@@ -27,7 +27,7 @@
     <link rel="top" title="Apache Flume" href="../index.html" />
     <link rel="up" title="Releases" href="index.html" />
     <link rel="next" title="Version 1.8.0" href="1.8.0.html" />
-    <link rel="prev" title="Releases" href="index.html" /> 
+    <link rel="prev" title="Version 1.10.0" href="1.10.0.html" /> 
   </head>
   <body>
 <div class="header">
@@ -216,7 +216,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/releases/index.html b/content/releases/index.html
index 2078dbb..cbc1ccb 100644
--- a/content/releases/index.html
+++ b/content/releases/index.html
@@ -25,8 +25,8 @@
     <script type="text/javascript" src="../_static/underscore.js"></script>
     <script type="text/javascript" src="../_static/doctools.js"></script>
     <link rel="top" title="Apache Flume" href="../index.html" />
-    <link rel="next" title="Version 1.9.0" href="1.9.0.html" />
-    <link rel="prev" title="Flume 1.9.0 Developer Guide" href="../FlumeDeveloperGuide.html" /> 
+    <link rel="next" title="Version 1.10.0" href="1.10.0.html" />
+    <link rel="prev" title="Flume 1.10.0 Developer Guide" href="../FlumeDeveloperGuide.html" /> 
   </head>
   <body>
 <div class="header">
@@ -62,12 +62,13 @@
   <div class="section" id="releases">
 <h1>Releases<a class="headerlink" href="#releases" title="Permalink to this headline">¶</a></h1>
 <p class="rubric">Current Release</p>
-<p>The current stable release is <a class="reference external" href="1.9.0.html">Apache Flume Version 1.9.0</a>.</p>
+<p>The current stable release is <a class="reference external" href="1.10.0.html">Apache Flume Version 1.10.0</a>.</p>
 <div class="toctree-wrapper compound">
 </div>
 <p class="rubric">Previous Releases</p>
 <div class="toctree-wrapper compound">
 <ul>
+<li class="toctree-l1"><a class="reference internal" href="1.9.0.html">Version 1.9.0</a></li>
 <li class="toctree-l1"><a class="reference internal" href="1.8.0.html">Version 1.8.0</a></li>
 <li class="toctree-l1"><a class="reference internal" href="1.7.0.html">Version 1.7.0</a></li>
 <li class="toctree-l1"><a class="reference internal" href="1.6.0.html">Version 1.6.0</a></li>
@@ -127,7 +128,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/search.html b/content/search.html
index 1ed327f..5fc765b 100644
--- a/content/search.html
+++ b/content/search.html
@@ -129,7 +129,7 @@
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/searchindex.js b/content/searchindex.js
index f6a1a07..31e04dc 100644
--- a/content/searchindex.js
+++ b/content/searchindex.js
@@ -1 +1 @@
-Search.setIndex({objtypes:{},objects:{},titles:["Version 1.0.0 - Incubating","Version 1.1.0 - Incubating","Version 1.3.1","Version 1.4.0","Version 1.5.0","Version 1.2.0","Version 1.3.0","Flume 1.9.0 Developer Guide","Version 1.8.0","Version 1.9.0","Version 1.5.0.1","Version 1.6.0","Version 1.5.2","Version 1.7.0","Source Repository","Download","Mailing lists","Flume 1.9.0 User Guide","Documentation","Testing","Welcome to Apache Flume","Releases","Team","How to Get Involved","Apache Licens [...]
\ No newline at end of file
+Search.setIndex({objtypes:{},objects:{},titles:["Version 1.0.0 - Incubating","Version 1.10.0","Version 1.1.0 - Incubating","Version 1.3.1","Version 1.4.0","Version 1.5.0","Version 1.2.0","Version 1.3.0","Version 1.8.0","Flume 1.10.0 Developer Guide","Version 1.9.0","Version 1.5.0.1","Version 1.6.0","Source Repository","Version 1.5.2","Version 1.7.0","Mailing lists","Download","Flume 1.10.0 User Guide","Documentation","Testing","Welcome to Apache Flume","Releases","Team","How to Get Invol [...]
\ No newline at end of file
diff --git a/content/source.html b/content/source.html
index 35930ef..65be283 100644
--- a/content/source.html
+++ b/content/source.html
@@ -173,7 +173,7 @@ the source code.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/team.html b/content/team.html
index e09146c..cc35ca6 100644
--- a/content/team.html
+++ b/content/team.html
@@ -69,7 +69,7 @@ decisions, voting on releases as well as acting as Committers. Committers have d
 actively evolve the codebase. Contributors improve the project through submission of patches and suggestions
 to the Members. The number of Contributors to the project is unbounded. Get involved today. All contributions
 to the project are greatly appreciated.</p>
-<p>The following individuals are recognized as PMC Members or Project Committers.</p>
+<p>The following individuals are recognized as currently active PMC Members or Project Committers.</p>
 </div></blockquote>
 <table border="1" class="docutils">
 <colgroup>
@@ -88,185 +88,234 @@ to the project are greatly appreciated.</p>
 </tr>
 </thead>
 <tbody valign="top">
+<tr class="row-even"><td>Arvind Prabhakar</td>
+<td>arvind at apache.org</td>
+<td>arvind</td>
+<td>StreamSets</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-odd"><td>Balázs Donát Bessenyei</td>
+<td>bessbd at apache.org</td>
+<td>bessbd</td>
+<td>Ericsson</td>
+<td>PMC Chair</td>
+</tr>
+<tr class="row-even"><td>Denes Arvay</td>
+<td>denes at apache.org</td>
+<td>denes</td>
+<td>Cloudera</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-odd"><td>Jaroslav Cecho</td>
+<td>jarcec at apache.org</td>
+<td>jarcec</td>
+<td>StreamSets</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-even"><td>Jonathan Hsieh</td>
+<td>jmhsieh at apache.org</td>
+<td>jmhsieh</td>
+<td>Cloudera</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-odd"><td>Juhani Connolly</td>
+<td>juhanic at apache.org</td>
+<td>juhanic</td>
+<td>CyberAgent</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-even"><td>Mike Percy</td>
+<td>mpercy at apache.org</td>
+<td>mpercy</td>
+<td>Cloudera</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-odd"><td>Ahmed Radwan</td>
+<td>ahmed at apache.org</td>
+<td>ahmed</td>
+<td>Apple</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-even"><td>Ralph Goers</td>
+<td>rgoers at apache.org</td>
+<td>rgoers</td>
+<td>Nextiva</td>
+<td>PMC Member</td>
+</tr>
+<tr class="row-odd"><td>Tristan Stevens</td>
+<td>tristan at apache.org</td>
+<td>tristan</td>
+<td>Cloudera</td>
+<td>PMC Member</td>
+</tr>
+</tbody>
+</table>
+<p>The following individuals are recognized as former PMC Members or Project Committers</p>
+<table border="1" class="docutils">
+<colgroup>
+<col width="25%" />
+<col width="25%" />
+<col width="10%" />
+<col width="10%" />
+<col width="15%" />
+<col width="15%" />
+</colgroup>
+<thead valign="bottom">
+<tr class="row-odd"><th class="head">Name</th>
+<th class="head">Email</th>
+<th class="head">Id</th>
+<th class="head">Organization</th>
+<th class="head">Role</th>
+<th class="head">Status</th>
+</tr>
+</thead>
+<tbody valign="top">
 <tr class="row-even"><td>Aaron Kimball</td>
-<td><a class="reference external" href="mailto:kimballa&#37;&#52;&#48;apache&#46;org">kimballa<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>kimballa at apache.org</td>
 <td>kimballa</td>
 <td>Zymergen</td>
 <td>PMC Member</td>
+<td>Last active 2011</td>
 </tr>
 <tr class="row-odd"><td>Ashish Paliwal</td>
-<td><a class="reference external" href="mailto:apaliwal&#37;&#52;&#48;apache&#46;org">apaliwal<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>apaliwal at apache.org</td>
 <td>apaliwal</td>
 <td>Apple</td>
 <td>Committer</td>
+<td>Last active 2017</td>
 </tr>
 <tr class="row-even"><td>Andrew Bayer</td>
-<td><a class="reference external" href="mailto:abayer&#37;&#52;&#48;apache&#46;org">abayer<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>abayer at apache.org</td>
 <td>abayer</td>
 <td>CloudBees</td>
 <td>PMC Member</td>
+<td>Last active 2015</td>
 </tr>
-<tr class="row-odd"><td>Ahmed Radwan</td>
-<td><a class="reference external" href="mailto:ahmed&#37;&#52;&#48;apache&#46;org">ahmed<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>ahmed</td>
-<td>Apple</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-even"><td>Arvind Prabhakar</td>
-<td><a class="reference external" href="mailto:arvind&#37;&#52;&#48;apache&#46;org">arvind<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>arvind</td>
-<td>StreamSets</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-odd"><td>Balázs Donát Bessenyei</td>
-<td><a class="reference external" href="mailto:bessbd&#37;&#52;&#48;apache&#46;org">bessbd<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>bessbd</td>
-<td>Ericsson</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-even"><td>Brock Noland</td>
-<td><a class="reference external" href="mailto:brock&#37;&#52;&#48;apache&#46;org">brock<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-odd"><td>Brock Noland</td>
+<td>brock at apache.org</td>
 <td>brock</td>
 <td>phData</td>
 <td>PMC Member</td>
+<td>Last active 2019</td>
 </tr>
-<tr class="row-odd"><td>Bruce Mitchener</td>
-<td><a class="reference external" href="mailto:brucem&#37;&#52;&#48;apache&#46;org">brucem<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-even"><td>Bruce Mitchener</td>
+<td>brucem at apache.org</td>
 <td>brucem</td>
 <td>Data Fueled</td>
 <td>PMC Member</td>
+<td>Last active - project creation</td>
 </tr>
-<tr class="row-even"><td>Derek Deeter</td>
-<td><a class="reference external" href="mailto:ddeeter&#37;&#52;&#48;apache&#46;org">ddeeter<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-odd"><td>Derek Deeter</td>
+<td>ddeeter at apache.org</td>
 <td>ddeeter</td>
 <td>Vanderbilt University</td>
 <td>PMC Member</td>
-</tr>
-<tr class="row-odd"><td>Denes Arvay</td>
-<td><a class="reference external" href="mailto:denes&#37;&#52;&#48;apache&#46;org">denes<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>denes</td>
-<td>Cloudera</td>
-<td>PMC Member</td>
+<td>Last active - project creation</td>
 </tr>
 <tr class="row-even"><td>Eric Sammer</td>
-<td><a class="reference external" href="mailto:esammer&#37;&#52;&#48;apache&#46;org">esammer<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>esammer at apache.org</td>
 <td>esammer</td>
 <td>Splunk</td>
 <td>PMC Member</td>
+<td>Last active 2017</td>
 </tr>
 <tr class="row-odd"><td>Hari Shreedharan</td>
-<td><a class="reference external" href="mailto:hshreedharan&#37;&#52;&#48;apache&#46;org">hshreedharan<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>hshreedharan at apache.org</td>
 <td>hshreedharan</td>
 <td>StreamSets</td>
 <td>PMC Member</td>
+<td>Emeritus 2022</td>
 </tr>
 <tr class="row-even"><td>Henry Robinson</td>
-<td><a class="reference external" href="mailto:henry&#37;&#52;&#48;apache&#46;org">henry<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>henry at apache.org</td>
 <td>henry</td>
 <td>Cloudera</td>
 <td>PMC Member</td>
+<td>Last active - project creation</td>
 </tr>
-<tr class="row-odd"><td>Jaroslav Cecho</td>
-<td><a class="reference external" href="mailto:jarcec&#37;&#52;&#48;apache&#46;org">jarcec<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>jarcec</td>
-<td>StreamSets</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-even"><td>Johny Rufus</td>
-<td><a class="reference external" href="mailto:johnyrufus&#37;&#52;&#48;apache&#46;org">johnyrufus<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-odd"><td>Johny Rufus</td>
+<td>johnyrufus at apache.org</td>
 <td>johnyrufus</td>
 <td>Microsoft</td>
 <td>Committer</td>
-</tr>
-<tr class="row-odd"><td>Jonathan Hsieh</td>
-<td><a class="reference external" href="mailto:jmhsieh&#37;&#52;&#48;apache&#46;org">jmhsieh<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>jmhsieh</td>
-<td>Cloudera</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-even"><td>Juhani Connolly</td>
-<td><a class="reference external" href="mailto:juhanic&#37;&#52;&#48;apache&#46;org">juhanic<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>juhanic</td>
-<td>CyberAgent</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-odd"><td>Mike Percy</td>
-<td><a class="reference external" href="mailto:mpercy&#37;&#52;&#48;apache&#46;org">mpercy<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>mpercy</td>
-<td>Cloudera</td>
-<td>PMC Member</td>
+<td>Last active 2017</td>
 </tr>
 <tr class="row-even"><td>Mingjie Lai</td>
-<td><a class="reference external" href="mailto:mlai&#37;&#52;&#48;apache&#46;org">mlai<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>mlai at apache.org</td>
 <td>mlai</td>
 <td>Apple</td>
 <td>PMC Member</td>
+<td>Last active 2012</td>
 </tr>
 <tr class="row-odd"><td>Mubarak Seyed</td>
-<td><a class="reference external" href="mailto:mubarak&#37;&#52;&#48;apache&#46;org">mubarak<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>mubarak at apache.org</td>
 <td>mubarak</td>
 <td>Apple</td>
 <td>Committer</td>
+<td>Last active 2017</td>
 </tr>
 <tr class="row-even"><td>Nick Verbeck</td>
-<td><a class="reference external" href="mailto:nerdynick&#37;&#52;&#48;apache&#46;org">nerdynick<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>nerdynick at apache.org</td>
 <td>nerdynick</td>
 <td>&nbsp;</td>
 <td>PMC Member</td>
+<td>Last active 2011</td>
 </tr>
 <tr class="row-odd"><td>Patrick Hunt</td>
-<td><a class="reference external" href="mailto:phunt&#37;&#52;&#48;apache&#46;org">phunt<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>phunt at apache.org</td>
 <td>phunt</td>
 <td>Cloudera</td>
 <td>PMC Member</td>
+<td>Last active 2012</td>
 </tr>
 <tr class="row-even"><td>Patrick Wendell</td>
-<td><a class="reference external" href="mailto:pwendell&#37;&#52;&#48;apache&#46;org">pwendell<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>pwendell at apache.org</td>
 <td>pwendell</td>
 <td>Databricks</td>
 <td>Committer</td>
+<td>Last active 2015</td>
 </tr>
 <tr class="row-odd"><td>Prasad Mujumdar</td>
-<td><a class="reference external" href="mailto:prasadm&#37;&#52;&#48;apache&#46;org">prasadm<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<td>prasadm at apache.org</td>
 <td>prasadm</td>
 <td>BlueTalon</td>
 <td>PMC Member</td>
+<td>Last active 2015</td>
 </tr>
-<tr class="row-even"><td>Ralph Goers</td>
-<td><a class="reference external" href="mailto:rgoers&#37;&#52;&#48;apache&#46;org">rgoers<span>&#64;</span>apache<span>&#46;</span>org</a></td>
-<td>rgoers</td>
-<td>Nextiva</td>
-<td>PMC Member</td>
-</tr>
-<tr class="row-odd"><td>Roshan Naik</td>
-<td><a class="reference external" href="mailto:roshannaik&#37;&#52;&#48;apache&#46;org">roshannaik<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-even"><td>Roshan Naik</td>
+<td>roshannaik at apache.org</td>
 <td>roshannaik</td>
 <td>Hortonworks</td>
 <td>PMC Member</td>
+<td>Last active 2017</td>
 </tr>
-<tr class="row-even"><td>Attila Simon</td>
-<td><a class="reference external" href="mailto:sati&#37;&#52;&#48;apache&#46;org">sati<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-odd"><td>Attila Simon</td>
+<td>sati at apache.org</td>
 <td>sati</td>
 <td>RapidMiner</td>
 <td>Committer</td>
+<td>Last active 2017</td>
 </tr>
-<tr class="row-odd"><td>Ferenc Szabo</td>
-<td><a class="reference external" href="mailto:szaboferee&#37;&#52;&#48;apache&#46;org">szaboferee<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-even"><td>Ferenc Szabo</td>
+<td>szaboferee at apache.org</td>
 <td>szaboferee</td>
 <td>Cloudera</td>
 <td>Committer</td>
+<td>Last active 2019</td>
 </tr>
-<tr class="row-even"><td>Wolfgang Hoschek</td>
-<td><a class="reference external" href="mailto:whoschek&#37;&#52;&#48;apache&#46;org">whoschek<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-odd"><td>Wolfgang Hoschek</td>
+<td>whoschek at apache.org</td>
 <td>whoschek</td>
 <td>Cloudera</td>
 <td>Committer</td>
+<td>Last active 2016</td>
 </tr>
-<tr class="row-odd"><td>Will McQueen</td>
-<td><a class="reference external" href="mailto:will&#37;&#52;&#48;apache&#46;org">will<span>&#64;</span>apache<span>&#46;</span>org</a></td>
+<tr class="row-even"><td>Will McQueen</td>
+<td>will at apache.org</td>
 <td>will</td>
 <td>&nbsp;</td>
 <td>PMC Member</td>
+<td>Last active 2017</td>
 </tr>
 </tbody>
 </table>
@@ -315,7 +364,7 @@ to the project are greatly appreciated.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/content/testing.html b/content/testing.html
index 5ec37a1..430e7de 100644
--- a/content/testing.html
+++ b/content/testing.html
@@ -109,7 +109,7 @@ and all Pull Requests at GitHub.</p>
       <div class="clearer"></div>
     </div>
 <div class="footer">
-    &copy; Copyright 2009-2021 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
+    &copy; Copyright 2009-2022 The Apache Software Foundation. Apache Flume, Flume, Apache, the Apache feather logo, and the Apache Flume project logo are trademarks of The Apache Software Foundation..
 </div>
   </body>
 </html>
\ No newline at end of file
diff --git a/source/sphinx/FlumeDeveloperGuide.rst b/source/sphinx/FlumeDeveloperGuide.rst
index 5e98bd3..f383cda 100644
--- a/source/sphinx/FlumeDeveloperGuide.rst
+++ b/source/sphinx/FlumeDeveloperGuide.rst
@@ -15,7 +15,7 @@
 
 
 ======================================
-Flume 1.9.0 Developer Guide
+Flume 1.10.0 Developer Guide
 ======================================
 
 Introduction
@@ -866,3 +866,93 @@ Channel
 ~~~~~~~
 
 TBD
+
+Initializable
+~~~~~~~~~~~~~
+
+As of Flume 1.10.0 Sources, Sinks, and Channels may implement the Intitializable interface. Doing so
+allows the component to have access the materialized configuration before any of the components have been
+started.
+
+This example shows a Sink being configured with the name of a Source. While initializing it will
+retrieve the Source from the configuration and save it. During event processing a new event will be
+sent to the Source, presumably after the event has be modified in some way.
+
+.. code-block:: java
+
+  public class NullInitSink extends NullSink implements Initializable {
+
+    private static final Logger logger = LoggerFactory.getLogger(NullInitSink.class);
+    private String sourceName = null;
+    private EventProcessor eventProcessor = null;
+    private long total = 0;
+
+    public NullInitSink() {
+      super();
+    }
+
+    @Override
+    public void configure(Context context) {
+      sourceName = context.getString("targetSource");
+      super.configure(context);
+
+    }
+
+    @Override
+    public void initialize(MaterializedConfiguration configuration) {
+      logger.debug("Locating source for event publishing");
+      for (Map.Entry<String, SourceRunner>  entry : configuration.getSourceRunners().entrySet()) {
+        if (entry.getKey().equals(sourceName)) {
+          Source source = entry.getValue().getSource();
+          if (source instanceof EventProcessor) {
+            eventProcessor = (EventProcessor) source;
+            logger.debug("Found event processor {}", source.getName());
+            return;
+          }
+        }
+      }
+      logger.warn("No Source named {} found for republishing events.", sourceName);
+    }
+
+    @Override
+    public Status process() throws EventDeliveryException {
+      Status status = Status.READY;
+
+      Channel channel = getChannel();
+      Transaction transaction = channel.getTransaction();
+      Event event = null;
+      CounterGroup counterGroup = getCounterGroup();
+      long batchSize = getBatchSize();
+      long eventCounter = counterGroup.get("events.success");
+
+      try {
+        transaction.begin();
+        int i = 0;
+        for (i = 0; i < batchSize; i++) {
+          event = channel.take();
+          if (event != null) {
+            long id = Long.parseLong(new String(event.getBody()));
+            total += id;
+            event.getHeaders().put("Total", Long.toString(total));
+            eventProcessor.processEvent(event);
+            logger.info("Null sink {} successful processed event {}", getName(), id);
+          } else {
+            status = Status.BACKOFF;
+            break;
+          }
+        }
+        transaction.commit();
+        counterGroup.addAndGet("events.success", (long) Math.min(batchSize, i));
+        counterGroup.incrementAndGet("transaction.success");
+      } catch (Exception ex) {
+        transaction.rollback();
+        counterGroup.incrementAndGet("transaction.failed");
+        logger.error("Failed to deliver event. Exception follows.", ex);
+        throw new EventDeliveryException("Failed to deliver event: " + event, ex);
+      } finally {
+        transaction.close();
+      }
+
+      return status;
+    }
+  }
\ No newline at end of file
diff --git a/source/sphinx/FlumeUserGuide.rst b/source/sphinx/FlumeUserGuide.rst
index b740507..7d7b3fd 100644
--- a/source/sphinx/FlumeUserGuide.rst
+++ b/source/sphinx/FlumeUserGuide.rst
@@ -14,9 +14,9 @@
    limitations under the License.
 
 
-===============================
-Flume 1.9.0 User Guide
-===============================
+================================
+Flume 1.10.0 User Guide
+================================
 
 Introduction
 ============
@@ -109,18 +109,20 @@ There's also a memory channel which simply stores the events in an in-memory
 queue, which is faster but any events still left in the memory channel when an
 agent process dies can't be recovered.
 
+Flume's `KafkaChannel` uses Apache Kafka to stage events. Using a replicated
+Kafka topic as a channel helps avoiding event loss in case of a disk failure.
+
 Setup
 =====
 
 Setting up an agent
 -------------------
 
-Flume agent configuration is stored in a local configuration file.  This is a
-text file that follows the Java properties file format.
-Configurations for one or more agents can be specified in the same
-configuration file. The configuration file includes properties of each source,
-sink and channel in an agent and how they are wired together to form data
-flows.
+Flume agent configuration is stored in one or more configuration files that
+follow the Java properties file format. Configurations for one or more agents
+can be specified in these configuration files. The configuration includes
+properties of each source, sink and channel in an agent and how they are wired
+together to form data flows.
 
 Configuring individual components
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -222,30 +224,110 @@ The original Flume terminal will output the event in a log message.
 
 Congratulations - you've successfully configured and deployed a Flume agent! Subsequent sections cover agent configuration in much more detail.
 
-Using environment variables in configuration files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Configuration from URIs
+~~~~~~~~~~~~~~~~~~~~~~~
+As of version 1.10.0 Flume supports being configured using URIs instead of just from local files. Direct support
+for HTTP(S), file, and classpath URIs is included. The HTTP support includes support for authentication using
+basic authorization but other authorization mechanisms may be supported by specifying the fully qualified name
+of the class that implements the AuthorizationProvider interface using the --auth-provider option. HTTP also
+supports reloading of configuration files using polling if the target server properly responds to the If-Modified-Since
+header.
+
+To specify credentials for HTTP authentication add::
+
+  --conf-user userid --conf-password password
+
+to the startup command.
+
+Multiple Configuration Files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+As of version 1.10.0 Flume supports being configured from multiple configuration files instead of just one.
+This more easily allows values to be overridden or added based on specific environments. Each file should
+be configured using its own --conf-file or --conf-uri option. However, all files should either be provided
+with --conf-file or with --conf-uri. If --conf-file and --conf-uri appear together as options all --conf-uri
+configurations will be processed before any of the --conf-file configurations are merged.
+
+For example, a configuration of::
+
+  $ bin/flume-ng agent --conf conf --conf-file example.conf --conf-uri http://localhost:80/flume.conf --conf-uri http://localhost:80/override.conf --name a1 -Dflume.root.logger=INFO,console
+
+will cause flume.conf to be read first, override.conf to be merged with it and finally example.conf would be
+merged last. If it is desirec to have example.conf be the base configuration it should be specified using the
+--conf-uri option either as::
+
+  --conf-uri classpath://example.conf
+  or
+  --conf-uri file:///example.conf
+
+depending on how it should be accessed.
+
+Using environment variables, system properies, or other properties configuration files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Flume has the ability to substitute environment variables in the configuration. For example::
 
   a1.sources = r1
   a1.sources.r1.type = netcat
   a1.sources.r1.bind = 0.0.0.0
-  a1.sources.r1.port = ${NC_PORT}
+  a1.sources.r1.port = ${env:NC_PORT}
   a1.sources.r1.channels = c1
 
 NB: it currently works for values only, not for keys. (Ie. only on the "right side" of the `=` mark of the config lines.)
 
-This can be enabled via Java system properties on agent invocation by setting `propertiesImplementation = org.apache.flume.node.EnvVarResolverProperties`.
+As of version 1.10.0 Flume resolves configuration values using Apache Commons Text's StringSubstitutor
+class using the default set of Lookups along with a lookup that uses the configuration files as a
+source for replacement values.
 
 For example::
-  $ NC_PORT=44444 bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console -DpropertiesImplementation=org.apache.flume.node.EnvVarResolverProperties
+  $ NC_PORT=44444 bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console
 
 Note the above is just an example, environment variables can be configured in other ways, including being set in `conf/flume-env.sh`.
 
+As noted, system properties are also supported, so the configuration::
+
+  a1.sources = r1
+  a1.sources.r1.type = netcat
+  a1.sources.r1.bind = 0.0.0.0
+  a1.sources.r1.port = ${sys:NC_PORT}
+  a1.sources.r1.channels = c1
+
+could be used and the startup command could be::
+
+  $ bin/flume-ng agent --conf conf --conf-file example.conf --name a1 -Dflume.root.logger=INFO,console -DNC_PORT=44444
+
+Furthermore, because multiple configuration files are allowed the first file could contain::
+
+  a1.sources = r1
+  a1.sources.r1.type = netcat
+  a1.sources.r1.bind = 0.0.0.0
+  a1.sources.r1.port = ${NC_PORT}
+  a1.sources.r1.channels = c1
+
+and the override file could contain::
+
+  NC_PORT = 44444
+
+In this case the startup command could be::
+
+  $ bin/flume-ng agent --conf conf --conf-file example.conf --conf-file override.conf --name a1 -Dflume.root.logger=INFO,console
+
+Note that the method for specifying environment variables as was done in prior versions will stil work
+but has been deprecated in favor of using ${env:varName}.
+
+Using a command options file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Instead of specifying all the command options on the command line as of version 1.10.0 command
+options may be placed in either /etc/flume/flume.opts or flume.opts on the classpath. An example
+might be::
+
+  conf-file = example.conf
+  conf-file = override.conf
+  name = a1
+
 Logging raw data
 ~~~~~~~~~~~~~~~~
 
 
-Logging the raw stream of data flowing through the ingest pipeline is not desired behaviour in
+Logging the raw stream of data flowing through the ingest pipeline is not desired behavior in
 many production environments because this may result in leaking sensitive data or security related
 configurations, such as secret keys, to Flume log files.
 By default, Flume will not log such information. On the other hand, if the data pipeline is broken,
@@ -487,10 +569,10 @@ component:
   <Agent>.sources.<Source>.<someProperty> = <someValue>
 
   # properties for channels
-  <Agent>.channel.<Channel>.<someProperty> = <someValue>
+  <Agent>.channels.<Channel>.<someProperty> = <someValue>
 
   # properties for sinks
-  <Agent>.sources.<Sink>.<someProperty> = <someValue>
+  <Agent>.sinks.<Sink>.<someProperty> = <someValue>
 
 The property "type" needs to be set for each component for Flume to understand
 what kind of object it needs to be. Each source, sink and channel type has its
@@ -539,7 +621,7 @@ linked to form multiple flows:
   <Agent>.channels = <Channel1> <Channel2>
 
 Then you can link the sources and sinks to their corresponding channels (for
-sources) of channel (for sinks) to setup two different flows. For example, if
+sources) or channel (for sinks) to setup two different flows. For example, if
 you need to setup two flows in an agent, one going from an external avro client
 to external HDFS and another from output of a tail to avro sink, then here's a
 config to do that:
@@ -1248,7 +1330,7 @@ recursiveDirectorySearch  false           Whether to monitor sub directories for
 maxBackoff                4000            The maximum time (in millis) to wait between consecutive attempts to
                                           write to the channel(s) if the channel is full. The source will start at
                                           a low backoff and increase it exponentially each time the channel throws a
-                                          ChannelException, upto the value specified by this parameter.
+                                          ChannelException, up to the value specified by this parameter.
 batchSize                 100             Granularity at which to batch transfer to the channel
 inputCharset              UTF-8           Character set used by deserializers that treat the input file as text.
 decodeErrorPolicy         ``FAIL``        What to do when we see a non-decodable character in the input file.
@@ -1260,7 +1342,7 @@ deserializer              ``LINE``        Specify the deserializer used to parse
                                           Defaults to parsing each line as an event. The class specified must implement
                                           ``EventDeserializer.Builder``.
 deserializer.*                            Varies per event deserializer.
-bufferMaxLines            --              (Obselete) This option is now ignored.
+bufferMaxLines            --              (Obsolete) This option is now ignored.
 bufferMaxLineLength       5000            (Deprecated) Maximum length of a line in the commit buffer. Use deserializer.maxLineLength instead.
 selector.type             replicating     replicating or multiplexing
 selector.*                                Depends on the selector.type value
@@ -1412,7 +1494,7 @@ Twitter 1% firehose Source (experimental)
   Use at your own risk.
 
 Experimental source that connects via Streaming API to the 1% sample twitter
-firehose, continously downloads tweets, converts them to Avro format and
+firehose, continuously downloads tweets, converts them to Avro format and
 sends Avro events to a downstream Flume sink. Requires the consumer and
 access tokens and secrets of a Twitter developer account.
 Required properties are in **bold**.
@@ -1460,7 +1542,7 @@ Property Name                       Default      Description
 **kafka.bootstrap.servers**         --           List of brokers in the Kafka cluster used by the source
 kafka.consumer.group.id             flume        Unique identified of consumer group. Setting the same id in multiple sources or agents
                                                  indicates that they are part of the same consumer group
-**kafka.topics**                    --           Comma-separated list of topics the kafka consumer will read messages from.
+**kafka.topics**                    --           Comma-separated list of topics the Kafka consumer will read messages from.
 **kafka.topics.regex**              --           Regex that defines set of topics the source is subscribed on. This property has higher priority
                                                  than ``kafka.topics`` and overrides ``kafka.topics`` if exists.
 batchSize                           1000         Maximum number of messages written to Channel in one batch
@@ -1505,8 +1587,8 @@ Property Name                    Default              Description
 ===============================  ===================  ================================================================================================
 topic                            --                   Use kafka.topics
 groupId                          flume                Use kafka.consumer.group.id
-zookeeperConnect                 --                   Is no longer supported by kafka consumer client since 0.9.x. Use kafka.bootstrap.servers
-                                                      to establish connection with kafka cluster
+zookeeperConnect                 --                   Is no longer supported by Kafka consumer client since 0.9.x. Use kafka.bootstrap.servers
+                                                      to establish connection with Kafka cluster
 migrateZookeeperOffsets          true                 When no Kafka stored offset is found, look up the offsets in Zookeeper and commit them to Kafka.
                                                       This should be true to support seamless Kafka client migration from older versions of Flume.
                                                       Once migrated this can be set to false, though that should generally not be required.
@@ -1579,7 +1661,7 @@ Example configuration with server side authentication and data encryption.
     a1.sources.source1.kafka.consumer.ssl.truststore.location=/path/to/truststore.jks
     a1.sources.source1.kafka.consumer.ssl.truststore.password=<password to access the truststore>
 
-Specyfing the truststore is optional here, the global truststore can be used instead.
+Specifying the truststore is optional here, the global truststore can be used instead.
 For more details about the global SSL setup, see the `SSL/TLS support`_ section.
 
 Note: By default the property ``ssl.endpoint.identification.algorithm``
@@ -2416,10 +2498,12 @@ serializer.*
 
 Deprecated Properties
 
+======================  ============  ======================================================================================
 Name                    Default       Description
-======================  ============  ======================================================================
-hdfs.callTimeout        30000         Number of milliseconds allowed for HDFS operations, such as open, write, flush, close. This number should be increased if many HDFS timeout operations are occurring.
-======================  ============  ======================================================================
+======================  ============  ======================================================================================
+hdfs.callTimeout        30000         Number of milliseconds allowed for HDFS operations, such as open, write, flush, close.
+                                      This number should be increased if many HDFS timeout operations are occurring.
+======================  ============  ======================================================================================
 
 Example for agent named a1:
 
@@ -2429,7 +2513,7 @@ Example for agent named a1:
   a1.sinks = k1
   a1.sinks.k1.type = hdfs
   a1.sinks.k1.channel = c1
-  a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/%H%M/%S
+  a1.sinks.k1.hdfs.path = /flume/events/%Y-%m-%d/%H%M/%S
   a1.sinks.k1.hdfs.filePrefix = events-
   a1.sinks.k1.hdfs.round = true
   a1.sinks.k1.hdfs.roundValue = 10
@@ -2564,7 +2648,7 @@ Example for agent named a1:
  a1.sinks.k1.hive.metastore = thrift://127.0.0.1:9083
  a1.sinks.k1.hive.database = logsdb
  a1.sinks.k1.hive.table = weblogs
- a1.sinks.k1.hive.partition = asia,%{country},%y-%m-%d-%H-%M
+ a1.sinks.k1.hive.partition = asia,%{country},%Y-%m-%d-%H-%M
  a1.sinks.k1.useLocalTimeStamp = false
  a1.sinks.k1.round = true
  a1.sinks.k1.roundValue = 10
@@ -2997,74 +3081,6 @@ Example for agent named a1:
   # a1.sinks.k1.batchSize = 1000
   # a1.sinks.k1.batchDurationMillis = 1000
 
-ElasticSearchSink
-~~~~~~~~~~~~~~~~~
-
-This sink writes data to an elasticsearch cluster. By default, events will be written so that the `Kibana <http://kibana.org>`_ graphical interface
-can display them - just as if `logstash <https://logstash.net>`_ wrote them.
-
-The elasticsearch and lucene-core jars required for your environment must be placed in the lib directory of the Apache Flume installation.
-Elasticsearch requires that the major version of the client JAR match that of the server and that both are running the same minor version
-of the JVM. SerializationExceptions will appear if this is incorrect. To
-select the required version first determine the version of elasticsearch and the JVM version the target cluster is running. Then select an elasticsearch client
-library which matches the major version. A 0.19.x client can talk to a 0.19.x cluster; 0.20.x can talk to 0.20.x and 0.90.x can talk to 0.90.x. Once the
-elasticsearch version has been determined then read the pom.xml file to determine the correct lucene-core JAR version to use. The Flume agent
-which is running the ElasticSearchSink should also match the JVM the target cluster is running down to the minor version.
-
-Events will be written to a new index every day. The name will be <indexName>-yyyy-MM-dd where <indexName> is the indexName parameter. The sink
-will start writing to a new index at midnight UTC.
-
-Events are serialized for elasticsearch by the ElasticSearchLogStashEventSerializer by default. This behaviour can be
-overridden with the serializer parameter. This parameter accepts implementations of org.apache.flume.sink.elasticsearch.ElasticSearchEventSerializer
-or org.apache.flume.sink.elasticsearch.ElasticSearchIndexRequestBuilderFactory. Implementing ElasticSearchEventSerializer is deprecated in favour of
-the more powerful ElasticSearchIndexRequestBuilderFactory.
-
-The type is the FQCN: org.apache.flume.sink.elasticsearch.ElasticSearchSink
-
-Required properties are in **bold**.
-
-================  ======================================================================== =======================================================================================================
-Property Name     Default                                                                  Description
-================  ======================================================================== =======================================================================================================
-**channel**       --
-**type**          --                                                                       The component type name, needs to be ``org.apache.flume.sink.elasticsearch.ElasticSearchSink``
-**hostNames**     --                                                                       Comma separated list of hostname:port, if the port is not present the default port '9300' will be used
-indexName         flume                                                                    The name of the index which the date will be appended to. Example 'flume' -> 'flume-yyyy-MM-dd'
-                                                                                           Arbitrary header substitution is supported, eg. %{header} replaces with value of named event header
-indexType         logs                                                                     The type to index the document to, defaults to 'log'
-                                                                                           Arbitrary header substitution is supported, eg. %{header} replaces with value of named event header
-clusterName       elasticsearch                                                            Name of the ElasticSearch cluster to connect to
-batchSize         100                                                                      Number of events to be written per txn.
-ttl               --                                                                       TTL in days, when set will cause the expired documents to be deleted automatically,
-                                                                                           if not set documents will never be automatically deleted. TTL is accepted both in the earlier form of
-                                                                                           integer only e.g. a1.sinks.k1.ttl = 5 and also with a qualifier ms (millisecond), s (second), m (minute),
-                                                                                           h (hour), d (day) and w (week). Example a1.sinks.k1.ttl = 5d will set TTL to 5 days. Follow
-                                                                                           http://www.elasticsearch.org/guide/reference/mapping/ttl-field/ for more information.
-serializer        org.apache.flume.sink.elasticsearch.ElasticSearchLogStashEventSerializer The ElasticSearchIndexRequestBuilderFactory or ElasticSearchEventSerializer to use. Implementations of
-                                                                                           either class are accepted but ElasticSearchIndexRequestBuilderFactory is preferred.
-serializer.*      --                                                                       Properties to be passed to the serializer.
-================  ======================================================================== =======================================================================================================
-
-.. note:: Header substitution is a handy to use the value of an event header to dynamically decide the indexName and indexType to use when storing the event.
-          Caution should be used in using this feature as the event submitter now has control of the indexName and indexType.
-          Furthermore, if the elasticsearch REST client is used then the event submitter has control of the URL path used.
-
-Example for agent named a1:
-
-.. code-block:: properties
-
-  a1.channels = c1
-  a1.sinks = k1
-  a1.sinks.k1.type = elasticsearch
-  a1.sinks.k1.hostNames = 127.0.0.1:9200,127.0.0.2:9300
-  a1.sinks.k1.indexName = foo_index
-  a1.sinks.k1.indexType = bar_type
-  a1.sinks.k1.clusterName = foobar_cluster
-  a1.sinks.k1.batchSize = 500
-  a1.sinks.k1.ttl = 5d
-  a1.sinks.k1.serializer = org.apache.flume.sink.elasticsearch.ElasticSearchDynamicSerializer
-  a1.sinks.k1.channel = c1
-
 Kite Dataset Sink
 ~~~~~~~~~~~~~~~~~
 
@@ -4037,6 +4053,29 @@ In the above configuration, c3 is an optional channel. Failure to write to c3 is
 simply ignored. Since c1 and c2 are not marked optional, failure to write to
 those channels will cause the transaction to fail.
 
+Load Balancing Channel Selector
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Load balancing channel selector provides the ability to load-balance flow over multiple channels. This
+effectively allows the incoming data to be processed on multiple threads. It maintains an indexed list of active channels on which the load must be distributed. Implementation supports distributing load using either via round_robin or random selection mechanisms. The choice of selection mechanism defaults to round_robin type, but can be overridden via configuration.
+
+Required properties are in **bold**.
+
+==================  =====================  =================================================
+Property Name       Default                Description
+==================  =====================  =================================================
+selector.type       replicating            The component type name, needs to be ``load_balancing``
+selector.policy     ``round_robin``        Selection mechanism. Must be either ``round_robin`` or ``random``.
+==================  =====================  =================================================
+
+Example for agent named a1 and it's source called r1:
+
+.. code-block:: properties
+
+  a1.sources = r1
+  a1.channels = c1 c2 c3 c4
+  a1.sources.r1.channels = c1 c2 c3 c4
+  a1.sources.r1.selector.type = load_balancing
+  a1.sources.r1.selector.policy = round_robin
 
 Multiplexing Channel Selector
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -4279,7 +4318,7 @@ Example for agent named a1:
 
   a1.sinks.k1.type = hdfs
   a1.sinks.k1.channel = c1
-  a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/%H%M/%S
+  a1.sinks.k1.hdfs.path = /flume/events/%Y-%m-%d/%H%M/%S
   a1.sinks.k1.serializer = avro_event
   a1.sinks.k1.serializer.compressionCodec = snappy
 
@@ -4805,7 +4844,7 @@ Log4J Appender
 
 Appends Log4j events to a flume agent's avro source. A client using this
 appender must have the flume-ng-sdk in the classpath (eg,
-flume-ng-sdk-1.9.0.jar).
+flume-ng-sdk-1.10.0.jar).
 Required properties are in **bold**.
 
 =====================  =======  ==================================================================================
@@ -4869,7 +4908,7 @@ Load Balancing Log4J Appender
 
 Appends Log4j events to a list of flume agent's avro source. A client using this
 appender must have the flume-ng-sdk in the classpath (eg,
-flume-ng-sdk-1.9.0.jar). This appender supports a round-robin and random
+flume-ng-sdk-1.10.0.jar). This appender supports a round-robin and random
 scheme for performing the load balancing. It also supports a configurable backoff
 timeout so that down agents are removed temporarily from the set of hosts
 Required properties are in **bold**.
@@ -5028,33 +5067,33 @@ Sources 2
 Sinks 1
 ~~~~~~~
 
-+------------------------+-------------+------------+---------------+-------+--------+
-|                        | Avro/Thrift | AsyncHBase | ElasticSearch | HBase | HBase2 |
-+------------------------+-------------+------------+---------------+-------+--------+
-| BatchCompleteCount     | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| BatchEmptyCount        | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| BatchUnderflowCount    | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ChannelReadFail        | x           |            |               |       | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ConnectionClosedCount  | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ConnectionCreatedCount | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| ConnectionFailedCount  | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| EventDrainAttemptCount | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| EventDrainSuccessCount | x           | x          | x             | x     | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| EventWriteFail         | x           |            |               |       | x      |
-+------------------------+-------------+------------+---------------+-------+--------+
-| KafkaEventSendTimer    |             |            |               |       |        |
-+------------------------+-------------+------------+---------------+-------+--------+
-| RollbackCount          |             |            |               |       |        |
-+------------------------+-------------+------------+---------------+-------+--------+
++------------------------+-------------+------------+-------+--------+
+|                        | Avro/Thrift | AsyncHBase | HBase | HBase2 |
++------------------------+-------------+------------+-------+--------+-
+| BatchCompleteCount     | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| BatchEmptyCount        | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| BatchUnderflowCount    | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ChannelReadFail        | x           |            |       | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionClosedCount  | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionCreatedCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| ConnectionFailedCount  | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventDrainAttemptCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventDrainSuccessCount | x           | x          | x     | x      |
++------------------------+-------------+------------+-------+--------+
+| EventWriteFail         | x           |            |       | x      |
++------------------------+-------------+------------+-------+--------+
+| KafkaEventSendTimer    |             |            |       |        |
++------------------------+-------------+------------+-------+--------+
+| RollbackCount          |             |            |       |        |
++------------------------+-------------+------------+-------+--------+
 
 Sinks 2
 ~~~~~~~
@@ -5540,7 +5579,6 @@ org.apache.flume.Sink                                         hdfs
 org.apache.flume.Sink                                         hbase                   org.apache.flume.sink.hbase.HBaseSink
 org.apache.flume.Sink                                         hbase2                  org.apache.flume.sink.hbase2.HBase2Sink
 org.apache.flume.Sink                                         asynchbase              org.apache.flume.sink.hbase.AsyncHBaseSink
-org.apache.flume.Sink                                         elasticsearch           org.apache.flume.sink.elasticsearch.ElasticSearchSink
 org.apache.flume.Sink                                         file_roll               org.apache.flume.sink.RollingFileSink
 org.apache.flume.Sink                                         irc                     org.apache.flume.sink.irc.IRCSink
 org.apache.flume.Sink                                         thrift                  org.apache.flume.sink.ThriftSink
diff --git a/source/sphinx/download.rst b/source/sphinx/download.rst
index 2cffa8d..b72626a 100644
--- a/source/sphinx/download.rst
+++ b/source/sphinx/download.rst
@@ -10,8 +10,8 @@ originals on the main distribution server.
 
 .. csv-table::
 
-   "Apache Flume binary (tar.gz)",  `apache-flume-1.9.0-bin.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz>`_, `apache-flume-1.9.0-bin.tar.gz.sha512 <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz.sha512>`_, `apache-flume-1.9.0-bin.tar.gz.asc <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz.asc>`_
-  "Apache Flume source (tar.gz)",  `apache-flume-1.9.0-src.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.9.0/apache-flume-1.9.0-src.tar.gz>`_, `apache-flume-1.9.0-src.tar.gz.sha512 <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-src.tar.gz.sha512>`_, `apache-flume-1.9.0-src.tar.gz.asc <http://www.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-src.tar.gz.asc>`_
+   "Apache Flume binary (tar.gz)",  `apache-flume-1.10.0-bin.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz>`_, `apache-flume-1.10.0-bin.tar.gz.sha512 <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz.sha512>`_, `apache-flume-1.10.0-bin.tar.gz.asc <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-bin.tar.gz.asc>`_
+  "Apache Flume source (tar.gz)",  `apache-flume-1.10.0-src.tar.gz <http://www.apache.org/dyn/closer.lua/flume/1.10.0/apache-flume-1.10.0-src.tar.gz>`_, `apache-flume-1.10.0-src.tar.gz.sha512 <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-src.tar.gz.sha512>`_, `apache-flume-1.10.0-src.tar.gz.asc <http://www.apache.org/dist/flume/1.10.0/apache-flume-1.10.0-src.tar.gz.asc>`_
 
 It is essential that you verify the integrity of the downloaded files using the PGP or MD5 signatures. Please read
 `Verifying Apache HTTP Server Releases <http://httpd.apache.org/dev/verification.html>`_ for more information on
@@ -23,9 +23,9 @@ as well as the asc signature file for the relevant distribution. Make sure you g
 Then verify the signatures using::
 
     % gpg --import KEYS
-    % gpg --verify apache-flume-1.9.0-src.tar.gz.asc
+    % gpg --verify apache-flume-1.10.0-src.tar.gz.asc
 
-Apache Flume 1.9.0 is signed by Ferenc Szabo 79E8E648
+Apache Flume 1.10.0 is signed by Ralph Goers B3D8E1BA
 
 Alternatively, you can verify the MD5 or SHA1 signatures of the files. A program called md5, md5sum, or shasum is included in many
 Unix distributions for this purpose.
diff --git a/source/sphinx/index.rst b/source/sphinx/index.rst
index b574411..a750734 100644
--- a/source/sphinx/index.rst
+++ b/source/sphinx/index.rst
@@ -31,6 +31,34 @@ application.
 
 .. rubric:: News
 
+.. raw:: html
+
+   <h3>June 5, 2022 - Apache Flume 1.10.0 Released</h3>
+
+The Apache Flume team is pleased to announce the release of Flume 1.10.0.
+
+Flume is a distributed, reliable, and available service for efficiently
+collecting, aggregating, and moving large amounts of streaming event data.
+
+Flume 1.10.0 is stable, production-ready software, and is backwards-compatible with
+previous versions of the Flume 1.x codeline.
+
+This version of Flume upgrades many dependencies, resolving the CVEs associated with them.
+Enhancements included in this release include the addition of a LoadBalancingChannelSelector,
+the ability to retrieve the Flume configuration from a remote source such as a Spring
+Cloud Config Server, and support for composite configurations.
+
+Flume has been updated to use Log4j 2.x instead of Log4j 1.x.
+
+The full change log and documentation are available on the
+`Flume 1.10.0 release page <releases/1.10.0.html>`__.
+
+This release can be downloaded from the Flume `Download <download.html>`__ page.
+
+Your contributions, feedback, help and support make Flume better!
+For more information on how to report problems or contribute,
+please visit our `Get Involved <getinvolved.html>`__ page.
+
 .. raw:: html
 
    <h3>January 8, 2019 - Apache Flume 1.9.0 Released</h3>
diff --git a/source/sphinx/releases/1.10.0.rst b/source/sphinx/releases/1.10.0.rst
new file mode 100644
index 0000000..681b5b1
--- /dev/null
+++ b/source/sphinx/releases/1.10.0.rst
@@ -0,0 +1,69 @@
+===============
+Version 1.10.0
+===============
+
+.. rubric:: Status of this release
+
+Apache Flume 1.10.0 is the twelfth release of Flume as an Apache top-level project
+(TLP). Apache Flume 1.10.0 is production-ready software.
+
+.. rubric:: Release Documentation
+
+* `Flume 1.10.0 User Guide <content/1.10.0/FlumeUserGuide.html>`__ (also in `pdf <content/1.10.0/FlumeUserGuide.pdf>`__)
+* `Flume 1.10.0 Developer Guide <content/1.10.0/FlumeDeveloperGuide.html>`__ (also in `pdf <content/1.10.0/FlumeDeveloperGuide.pdf>`__)
+* `Flume 1.10.0 API Documentation <content/1.10.0/apidocs/index.html>`__
+
+.. rubric:: Changes
+
+Release Notes - Flume - Version v1.10.0
+
+** Bug
+    * [`FLUME-3151 <https://issues.apache.org/jira/browse/FLUME-3151>`__] - Upgrade Hadoop to 2.10.1 
+    * [`FLUME-3311 <https://issues.apache.org/jira/browse/FLUME-3311>`__] - Update Wrong Use In HDFS Sink 
+    * [`FLUME-3316 <https://issues.apache.org/jira/browse/FLUME-3316>`__] - Syslog Rfc3164Date test fails when the test date falls on a leap day 
+    * [`FLUME-3328 <https://issues.apache.org/jira/browse/FLUME-3328>`__] - Fix Deprecated Properties table of HDFS Sink 
+    * [`FLUME-3356 <https://issues.apache.org/jira/browse/FLUME-3356>`__] - Probable security issue in Flume 
+    * [`FLUME-3360 <https://issues.apache.org/jira/browse/FLUME-3360>`__] - Maven assemble failed on macOS 
+    * [`FLUME-3395 <https://issues.apache.org/jira/browse/FLUME-3395>`__] - Fix for CVE-2021-44228 
+    * [`FLUME-3407 <https://issues.apache.org/jira/browse/FLUME-3407>`__] - workaround for jackson-mapper-asl-1.9.13.jar  @ flume-ng 
+    * [`FLUME-3409 <https://issues.apache.org/jira/browse/FLUME-3409>`__] - upgrade httpclient due to cve 
+    * [`FLUME-3416 <https://issues.apache.org/jira/browse/FLUME-3416>`__] - Improve input validation 
+    * [`FLUME-3421 <https://issues.apache.org/jira/browse/FLUME-3421>`__] - Default log4j settings do not log to console after FLUME-2050 
+    * [`FLUME-3426 <https://issues.apache.org/jira/browse/FLUME-3426>`__] - Unresolved Security Issues 
+
+** New Feature
+    * [`FLUME-3412 <https://issues.apache.org/jira/browse/FLUME-3412>`__] - Add LoadBalancingChannelSelector 
+
+** Improvement
+    * [`FLUME-199 <https://issues.apache.org/jira/browse/FLUME-199>`__] - Unit tests should hunt for available ports if defaults are in use 
+    * [`FLUME-2050 <https://issues.apache.org/jira/browse/FLUME-2050>`__] - Upgrade to log4j2 (when GA) 
+    * [`FLUME-3045 <https://issues.apache.org/jira/browse/FLUME-3045>`__] - Document GitHub Pull Requests in How to Contribute Guide 
+    * [`FLUME-3335 <https://issues.apache.org/jira/browse/FLUME-3335>`__] - Support configuration and reconfiguration via HTTP(S) 
+    * [`FLUME-3338 <https://issues.apache.org/jira/browse/FLUME-3338>`__] - Doc Flume Recoverability with Kafka 
+    * [`FLUME-3363 <https://issues.apache.org/jira/browse/FLUME-3363>`__] - CVE-2019-20445 
+    * [`FLUME-3368 <https://issues.apache.org/jira/browse/FLUME-3368>`__] - Update Jackson to 2.9.10 
+    * [`FLUME-3389 <https://issues.apache.org/jira/browse/FLUME-3389>`__] - Build and test Apache Flume on ARM64 CPU architecture 
+    * [`FLUME-3397 <https://issues.apache.org/jira/browse/FLUME-3397>`__] - Upgrade Log4 to 2.17.1 and SLF4J to 1.7.32 
+    * [`FLUME-3398 <https://issues.apache.org/jira/browse/FLUME-3398>`__] - Upgrade Kafka to a supported version. 
+    * [`FLUME-3399 <https://issues.apache.org/jira/browse/FLUME-3399>`__] - Update Jackson to 2.13.1 
+    * [`FLUME-3403 <https://issues.apache.org/jira/browse/FLUME-3403>`__] - The parquet-avro version used by flume is 1.4.1, which is vulnerable.
+    * [`FLUME-3405 <https://issues.apache.org/jira/browse/FLUME-3405>`__] - Reopened - The parquet-avro version used by flume is 1.4.1, which is vulnerable.
+    * [`FLUME-3413 <https://issues.apache.org/jira/browse/FLUME-3413>`__] - Add "initialization" phase to components. 
+
+** Wish
+    * [`FLUME-3400 <https://issues.apache.org/jira/browse/FLUME-3400>`__] - Upgrade commons-io to 2.11.0 
+
+** Task
+    * [`FLUME-3401 <https://issues.apache.org/jira/browse/FLUME-3401>`__] - Remove Kite Dataset Sink 
+    * [`FLUME-3402 <https://issues.apache.org/jira/browse/FLUME-3402>`__] - remove org.codehaus.jackson dependencies 
+    * [`FLUME-3404 <https://issues.apache.org/jira/browse/FLUME-3404>`__] - Update Commons CLI to 1.5.0, Commons Codec to 1.15, Commons Compress to 1.21 and Commons Lang to 2.6 
+    * [`FLUME-3410 <https://issues.apache.org/jira/browse/FLUME-3410>`__] - upgrade hbase version 
+    * [`FLUME-3411 <https://issues.apache.org/jira/browse/FLUME-3411>`__] - upgrade hive sink to 1.2.2 
+    * [`FLUME-3417 <https://issues.apache.org/jira/browse/FLUME-3417>`__] - Remove Elasticsearch sink that requires Elasticsearch 0.90.1 
+    * [`FLUME-3419 <https://issues.apache.org/jira/browse/FLUME-3419>`__] - Review project LICENSE and NOTICE 
+    * [`FLUME-3424 <https://issues.apache.org/jira/browse/FLUME-3424>`__] - Upgrade Twitter4j to version 4.0.7+ 
+
+** Dependency upgrade
+    * [`FLUME-3339 <https://issues.apache.org/jira/browse/FLUME-3339>`__] - Remove Xerces and Xalan dependencies 
+    * [`FLUME-3385 <https://issues.apache.org/jira/browse/FLUME-3385>`__] - flume-ng-sdk uses Avro-IPC version with vulnerable version of Jetty 
+    * [`FLUME-3386 <https://issues.apache.org/jira/browse/FLUME-3386>`__] - flume-ng-sdk uses vulnerable version of netty 
diff --git a/source/sphinx/releases/index.rst b/source/sphinx/releases/index.rst
index e9c353e..7906568 100644
--- a/source/sphinx/releases/index.rst
+++ b/source/sphinx/releases/index.rst
@@ -3,13 +3,13 @@ Releases
 
 .. rubric:: Current Release
 
-The current stable release is `Apache Flume Version 1.9.0 <1.9.0.html>`__.
+The current stable release is `Apache Flume Version 1.10.0 <1.10.0.html>`__.
 
 .. toctree::
    :maxdepth: 1
    :hidden:
 
-   1.9.0
+   1.10.0
 
 .. rubric:: Previous Releases
 
@@ -17,6 +17,7 @@ The current stable release is `Apache Flume Version 1.9.0 <1.9.0.html>`__.
    :maxdepth: 1
    :glob:
 
+   1.9.0
    1.8.0
    1.7.0
    1.6.0
diff --git a/source/sphinx/team.rst b/source/sphinx/team.rst
index 6e5405b..b1a4c63 100644
--- a/source/sphinx/team.rst
+++ b/source/sphinx/team.rst
@@ -10,39 +10,47 @@ Team
  to the Members. The number of Contributors to the project is unbounded. Get involved today. All contributions
  to the project are greatly appreciated.
 
- The following individuals are recognized as PMC Members or Project Committers.
+ The following individuals are recognized as currently active PMC Members or Project Committers.
 
 .. csv-table::
    :header: "Name", "Email", "Id", "Organization", "Role"
    :widths: 30, 25, 15, 15, 15
 
-   "Aaron Kimball", "kimballa@apache.org", "kimballa", "Zymergen", "PMC Member"
-   "Ashish Paliwal", "apaliwal@apache.org", "apaliwal", "Apple", "Committer"
-   "Andrew Bayer", "abayer@apache.org", "abayer", "CloudBees", "PMC Member"
-   "Ahmed Radwan", "ahmed@apache.org", "ahmed", "Apple", "PMC Member"
-   "Arvind Prabhakar", "arvind@apache.org", "arvind", "StreamSets", "PMC Member"
-   "Balázs Donát Bessenyei", "bessbd@apache.org", "bessbd", "Ericsson", "PMC Member"
-   "Brock Noland", "brock@apache.org", "brock", "phData", "PMC Member"
-   "Bruce Mitchener", "brucem@apache.org", "brucem", "Data Fueled", "PMC Member"
-   "Derek Deeter", "ddeeter@apache.org", "ddeeter", "Vanderbilt University", "PMC Member"
-   "Denes Arvay", "denes@apache.org", "denes", "Cloudera", "PMC Member"
-   "Eric Sammer", "esammer@apache.org", "esammer", "Splunk", "PMC Member"
-   "Hari Shreedharan", "hshreedharan@apache.org", "hshreedharan", "StreamSets", "PMC Member"
-   "Henry Robinson", "henry@apache.org", "henry", "Cloudera", "PMC Member"
-   "Jaroslav Cecho", "jarcec@apache.org", "jarcec", "StreamSets", "PMC Member"
-   "Johny Rufus", "johnyrufus@apache.org", "johnyrufus", "Microsoft", "Committer"
-   "Jonathan Hsieh", "jmhsieh@apache.org", "jmhsieh", "Cloudera", "PMC Member"
-   "Juhani Connolly", "juhanic@apache.org", "juhanic", "CyberAgent", "PMC Member"
-   "Mike Percy", "mpercy@apache.org", "mpercy", "Cloudera", "PMC Member"
-   "Mingjie Lai", "mlai@apache.org", "mlai", "Apple", "PMC Member"
-   "Mubarak Seyed", "mubarak@apache.org","mubarak", "Apple", "Committer"
-   "Nick Verbeck", "nerdynick@apache.org", "nerdynick", "", "PMC Member"
-   "Patrick Hunt", "phunt@apache.org", "phunt", "Cloudera", "PMC Member"
-   "Patrick Wendell", "pwendell@apache.org", "pwendell", "Databricks", "Committer"
-   "Prasad Mujumdar", "prasadm@apache.org", "prasadm", "BlueTalon", "PMC Member"
-   "Ralph Goers", "rgoers@apache.org", "rgoers", "Nextiva", "PMC Member"
-   "Roshan Naik", "roshannaik@apache.org", "roshannaik", "Hortonworks", "PMC Member"
-   "Attila Simon", "sati@apache.org", "sati", "RapidMiner", "Committer"
-   "Ferenc Szabo", "szaboferee@apache.org", "szaboferee", "Cloudera", "Committer"
-   "Wolfgang Hoschek", "whoschek@apache.org", "whoschek", "Cloudera", "Committer"
-   "Will McQueen", "will@apache.org", "will", "", "PMC Member"
+   "Arvind Prabhakar", "arvind at apache.org", "arvind", "StreamSets", "PMC Member"
+   "Balázs Donát Bessenyei", "bessbd at apache.org", "bessbd", "Ericsson", "PMC Chair"
+   "Denes Arvay", "denes at apache.org", "denes", "Cloudera", "PMC Member"
+   "Jaroslav Cecho", "jarcec at apache.org", "jarcec", "StreamSets", "PMC Member"
+   "Jonathan Hsieh", "jmhsieh at apache.org", "jmhsieh", "Cloudera", "PMC Member"
+   "Juhani Connolly", "juhanic at apache.org", "juhanic", "CyberAgent", "PMC Member"
+   "Mike Percy", "mpercy at apache.org", "mpercy", "Cloudera", "PMC Member"
+   "Ahmed Radwan", "ahmed at apache.org", "ahmed", "Apple", "PMC Member"
+   "Ralph Goers", "rgoers at apache.org", "rgoers", "Nextiva", "PMC Member"
+   "Tristan Stevens", "tristan at apache.org", "tristan", "Cloudera", "PMC Member"
+
+The following individuals are recognized as former PMC Members or Project Committers
+
+.. csv-table::
+   :header: "Name", "Email", "Id", "Organization", "Role", "Status"
+   :widths: 25, 25, 10, 10, 15, 15
+
+   "Aaron Kimball", "kimballa at apache.org", "kimballa", "Zymergen", "PMC Member", "Last active 2011"
+   "Ashish Paliwal", "apaliwal at apache.org", "apaliwal", "Apple", "Committer", "Last active 2017"
+   "Andrew Bayer", "abayer at apache.org", "abayer", "CloudBees", "PMC Member", "Last active 2015"
+   "Brock Noland", "brock at apache.org", "brock", "phData", "PMC Member", "Last active 2019"
+   "Bruce Mitchener", "brucem at apache.org", "brucem", "Data Fueled", "PMC Member", "Last active - project creation"
+   "Derek Deeter", "ddeeter at apache.org", "ddeeter", "Vanderbilt University", "PMC Member", "Last active - project creation"
+   "Eric Sammer", "esammer at apache.org", "esammer", "Splunk", "PMC Member", "Last active 2017"
+   "Hari Shreedharan", "hshreedharan at apache.org", "hshreedharan", "StreamSets", "PMC Member", "Emeritus 2022"
+   "Henry Robinson", "henry at apache.org", "henry", "Cloudera", "PMC Member", "Last active - project creation"
+   "Johny Rufus", "johnyrufus at apache.org", "johnyrufus", "Microsoft", "Committer", "Last active 2017"
+   "Mingjie Lai", "mlai at apache.org", "mlai", "Apple", "PMC Member", "Last active 2012"
+   "Mubarak Seyed", "mubarak at apache.org","mubarak", "Apple", "Committer", "Last active 2017"
+   "Nick Verbeck", "nerdynick at apache.org", "nerdynick", "", "PMC Member", "Last active 2011"
+   "Patrick Hunt", "phunt at apache.org", "phunt", "Cloudera", "PMC Member", "Last active 2012"
+   "Patrick Wendell", "pwendell at apache.org", "pwendell", "Databricks", "Committer", "Last active 2015"
+   "Prasad Mujumdar", "prasadm at apache.org", "prasadm", "BlueTalon", "PMC Member", "Last active 2015"
+   "Roshan Naik", "roshannaik at apache.org", "roshannaik", "Hortonworks", "PMC Member", "Last active 2017"
+   "Attila Simon", "sati at apache.org", "sati", "RapidMiner", "Committer", "Last active 2017"
+   "Ferenc Szabo", "szaboferee at apache.org", "szaboferee", "Cloudera", "Committer", "Last active 2019"
+   "Wolfgang Hoschek", "whoschek at apache.org", "whoschek", "Cloudera", "Committer", "Last active 2016"
+   "Will McQueen", "will at apache.org", "will", "", "PMC Member", "Last active 2017"