You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by ma...@apache.org on 2017/10/11 07:50:44 UTC

[trafficserver] 11/25: Merge branch 'master' into quic-latest

This is an automated email from the ASF dual-hosted git repository.

maskit pushed a commit to branch quic-05
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit fda42e677644e2bb7fb1714ac6b04fbce49f3566
Author: Leif Hedstrom <zw...@apache.org>
AuthorDate: Wed Oct 4 10:27:50 2017 -0700

    Merge branch 'master' into quic-latest
    
    * master: (149 commits)
      ESI: Make maximum file count runtime configurable.
      Add support for Forwarded HTTP header tag (RFC7239).
      UDPNet: Yet another timing fix for test_UDPnet.
      Support dynamic registration to StatPages.
      Cleanup: fix rollback to use elevating file operations.
      Update s3auth test, removed cpp file and added the Catch main define in the unit test file
      BufferWriter: Add data() method for access to the internal buffer for API consistency with std:string and ts::string_view.
      Ignoring unit tests
      Make sure all files in the sandbox are readable
      Test: Fix timing issues with txn and double tests. Also tweak test_UDPNet to report errors better and use a less common port.
      Preserve Accept-Encoding header for H2 Server Push promise.
      Tests: Clean up file extensions in lib/ts unit tests.
      Update document
      add new TLS milestones
      Doc: Clean up comments in null_transform example plugin.
      Add string literal support to ts::string_view.
      Replace proxy.config.http.normalize_ae_gzip with normalize_ae w/ Brotli support Issue # 2100
      Cleanup: Fix traffic_manager so its checks run if WCCP is enabled.
      Doc: Minor fixups for duplicated milestones and stat typo.
      YTSATS-1464: Support set-redirect with READ_REQUEST_PRE_REMAP_HOOK
      ...
    
    (cherry picked from commit b44224fd97b289a97a1bdb09aa6bed80f2d9a360)
---
 .gitignore                                         |   3 +
 Makefile.am                                        |   4 +-
 ci/jenkins/bin/autest.sh                           |  35 +-
 ci/jenkins/bin/github.sh                           |   2 +-
 ci/jenkins/bin/regression.sh                       |   2 +-
 cmd/traffic_cop/traffic_cop.cc                     |  51 +-
 cmd/traffic_crashlog/traffic_crashlog.cc           |   7 +-
 cmd/traffic_ctl/traffic_ctl.cc                     |   3 +
 cmd/traffic_layout/traffic_layout.cc               |  32 +-
 cmd/traffic_manager/Makefile.am                    |  23 +-
 cmd/traffic_manager/metrics.cc                     |   2 +-
 cmd/traffic_manager/traffic_manager.cc             |  66 +-
 cmd/traffic_top/traffic_top.cc                     |   1 +
 configure.ac                                       |  41 +-
 doc/Makefile.am                                    |   7 +-
 .../transparent-forward-proxying.en.rst            |   3 +
 doc/admin-guide/files/records.config.en.rst        | 104 ++-
 doc/admin-guide/files/ssl_multicert.config.en.rst  |  11 +
 doc/admin-guide/logging/formatting.en.rst          |  27 +-
 .../monitoring/statistics/core/general.en.rst      |   5 +
 doc/admin-guide/plugins/cachekey.en.rst            |  52 ++
 doc/admin-guide/plugins/gzip.en.rst                |   4 +-
 doc/admin-guide/plugins/header_rewrite.en.rst      |   5 +
 doc/admin-guide/plugins/ts_lua.en.rst              |   9 +-
 doc/appendices/command-line/traffic_ctl.en.rst     |   2 +
 doc/conf.py                                        |   5 +
 .../api/functions/TSContSchedule.en.rst            |   2 +-
 .../api/functions/TSHttpArgs.en.rst                |  76 ++
 .../api/functions/TSHttpOverridableConfig.en.rst   |   3 +-
 ...rFieldCreate.en.rst => TSHttpTxnAborted.en.rst} |  20 +-
 .../api/functions/TSHttpTxnMilestoneGet.en.rst     |  16 +-
 .../api/functions/TSMimeHdrFieldAppend.en.rst      |  21 +-
 .../api/functions/TSMimeHdrFieldCreate.en.rst      |  18 +-
 .../api/functions/TSUuidCreate.en.rst              |   5 +-
 .../api/types/TSOverridableConfigKey.en.rst        |   2 +-
 .../cache-architecture/data-structures.en.rst      |   2 +-
 .../plugins/adding-statistics.en.rst               |   4 +-
 .../plugins/example-plugins/index.en.rst           |   6 +-
 .../plugins/http-transformations/index.en.rst      |   2 +-
 doc/ext/traffic-server.py                          |  17 +-
 doc/static/languages.json                          |   4 +-
 example/Makefile.am                                |  38 +-
 example/null_transform/null_transform.c            |  13 +-
 .../server_transform.c}                            |  98 +--
 .../session-1.c => session_hooks/session_hooks.c}  |  50 +-
 example/ssl-preaccept/ats-util.h                   |  64 --
 .../ssl_preaccept.cc}                              |  37 +-
 .../ssl_preaccept.config                           |   0
 example/{ssl-sni/ssl-sni.cc => ssl_sni/ssl_sni.cc} |  42 +-
 example/{ssl-sni => ssl_sni}/ssl_sni.config        |   0
 .../ssl_sni_whitelist.cc}                          |  26 +-
 .../ssl_sni_whitelist.config                       |   0
 example/statistic/statistic.cc                     |  18 +-
 example/{thread-1 => thread_1}/readme.txt          |   2 -
 .../{thread-1/thread-1.c => thread_1/thread_1.c}   |  26 +-
 example/{thread-pool => thread_pool}/README.txt    |   5 +-
 example/{thread-pool => thread_pool}/TESTPLAN.txt  |  12 +-
 .../include/Makefile.am                            |   0
 example/{thread-pool => thread_pool}/include/gen.c |   0
 .../include/gen_inc.sh                             |   0
 example/{thread-pool => thread_pool}/psi.c         |  73 +-
 .../test/SDKTest/SDKtest_server.config             |   0
 .../test/SDKTest/psi_server.c                      |   0
 .../test/SynTest/Tests/Psi/1.cfg                   |   0
 .../test/SynTest/Tests/Psi/10.cfg                  |   0
 .../test/SynTest/Tests/Psi/11.cfg                  |   0
 .../test/SynTest/Tests/Psi/12.cfg                  |   0
 .../test/SynTest/Tests/Psi/13.cfg                  |   0
 .../test/SynTest/Tests/Psi/2.cfg                   |   0
 .../test/SynTest/Tests/Psi/3.cfg                   |   0
 .../test/SynTest/Tests/Psi/4.cfg                   |   0
 .../test/SynTest/Tests/Psi/5.cfg                   |   0
 .../test/SynTest/Tests/Psi/6.cfg                   |   0
 .../test/SynTest/Tests/Psi/7.cfg                   |   0
 .../test/SynTest/Tests/Psi/8.cfg                   |   0
 .../test/SynTest/Tests/Psi/9.cfg                   |   0
 .../test/SynTest/Tests/Psi/psi_files/tc10_file.txt |   0
 .../test/SynTest/Tests/Psi/psi_files/tc11_file.txt |   0
 .../test/SynTest/Tests/Psi/psi_files/tc12_file.txt |   0
 .../test/SynTest/Tests/Psi/psi_files/tc13_file.txt |   0
 .../test/SynTest/Tests/Psi/psi_files/tc1_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc2_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc3_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc4_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc5_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc6_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc7_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc8_file.txt  |   0
 .../test/SynTest/Tests/Psi/psi_files/tc9_file.txt  |   0
 .../test/SynTest/system.cfg                        |   0
 .../test/SynTest/tests_psi.cfg                     |   0
 example/{thread-pool => thread_pool}/thread.c      |   4 +-
 example/{thread-pool => thread_pool}/thread.h      |   2 +
 .../txn_data_sink.c}                               |   4 +-
 example/version/version.c                          |  19 +-
 iocore/cache/Cache.cc                              |  14 +-
 iocore/cache/CacheDir.cc                           |   4 +-
 iocore/cache/CacheRead.cc                          |   4 +-
 iocore/cache/CacheWrite.cc                         |  16 +-
 iocore/cache/P_CacheBC.h                           |   7 +-
 iocore/cache/P_CacheVol.h                          |  15 +-
 iocore/cache/Store.cc                              |  14 +-
 iocore/dns/DNS.cc                                  | 276 ++++--
 iocore/dns/DNSConnection.cc                        |   3 +
 iocore/dns/I_DNSProcessor.h                        |  17 +-
 iocore/dns/P_DNSConnection.h                       |  17 +
 iocore/dns/P_DNSProcessor.h                        |   9 +-
 iocore/dns/SRV.h                                   |   5 +-
 iocore/eventsystem/I_Continuation.h                |   2 +-
 iocore/eventsystem/I_SocketManager.h               |   8 +-
 iocore/eventsystem/P_UnixSocketManager.h           |   2 +-
 iocore/eventsystem/UnixEventProcessor.cc           |   4 +-
 iocore/hostdb/HostDB.cc                            |  46 +-
 iocore/hostdb/I_HostDBProcessor.h                  |   5 +-
 iocore/hostdb/P_HostDBProcessor.h                  |  23 +-
 iocore/net/Makefile.am                             |   2 +-
 iocore/net/NetVCTest.cc                            |   2 +-
 iocore/net/P_InkBulkIO.h                           |   2 +-
 iocore/net/P_SSLConfig.h                           |   2 +-
 iocore/net/P_SSLNetVConnection.h                   |   2 +
 iocore/net/P_UnixNet.h                             | 107 ++-
 iocore/net/P_UnixNetVConnection.h                  |   1 +
 iocore/net/SSLConfig.cc                            |  41 +-
 iocore/net/SSLNetVConnection.cc                    |  59 +-
 iocore/net/SSLUtils.cc                             |  20 +-
 iocore/net/UnixNet.cc                              | 189 ++--
 iocore/net/UnixNetAccept.cc                        |  54 +-
 iocore/net/UnixNetVConnection.cc                   | 123 ++-
 iocore/net/UnixUDPNet.cc                           |   4 +-
 iocore/net/test_I_UDPNet.cc                        |  37 +-
 iocore/utils/I_Machine.h                           |   8 +
 iocore/utils/Machine.cc                            | 125 ++-
 lib/cppapi/Transaction.cc                          |   4 +-
 lib/perl/Makefile.am                               |   5 +-
 lib/records/I_RecCore.h                            |  27 +-
 lib/records/RecCore.cc                             |  38 +-
 lib/records/RecHttp.cc                             |   2 +-
 lib/ts/BufferWriter.h                              | 336 ++++++++
 lib/ts/Diags.cc                                    | 161 ++--
 lib/ts/Diags.h                                     |  36 +-
 lib/ts/HashSip.cc                                  |   2 +-
 lib/ts/I_Layout.h                                  |  62 +-
 lib/ts/IpMap.h                                     |  38 +-
 lib/ts/IpMapTest.cc                                | 283 ------
 lib/ts/Layout.cc                                   | 206 +++--
 lib/ts/Makefile.am                                 |   8 +-
 lib/ts/PriorityQueue.h                             |  18 +
 lib/ts/apidefs.h.in                                |   9 +-
 lib/ts/ink_args.cc                                 |   3 +
 lib/ts/ink_args.h                                  |   4 +
 lib/ts/ink_cap.cc                                  |  11 +
 lib/ts/ink_cap.h                                   |   2 +
 lib/ts/ink_defs.h                                  |  27 -
 lib/ts/ink_inet.cc                                 |  16 +-
 lib/ts/ink_memory.cc                               |   4 +-
 lib/ts/ink_memory.h                                |  48 ++
 lib/ts/ink_platform.h                              |  81 +-
 lib/ts/ink_sock.cc                                 |   5 +
 lib/ts/runroot.cc                                  | 144 ++++
 lib/ts/string_view.h                               |  13 +-
 lib/ts/test_PriorityQueue.cc                       |  93 ++
 lib/ts/unit-tests/test_BufferWriter.cc             | 322 +++++++
 lib/ts/unit-tests/test_IpMap.cc                    | 606 +++++++++++++
 lib/ts/unit-tests/test_layout.cc                   |  89 ++
 .../{string_view.cpp => test_string_view.cc}       |  63 +-
 lib/ts/unit-tests/{main.cpp => unit_test_main.cc}  |   4 +-
 mgmt/Alarms.cc                                     |   2 +-
 mgmt/FileManager.cc                                |   2 +-
 mgmt/LocalManager.cc                               |  24 +-
 mgmt/ProcessManager.cc                             |  17 +-
 mgmt/RecordsConfig.cc                              |  15 +-
 mgmt/Rollback.cc                                   |  11 +-
 mgmt/api/CoreAPIRemote.cc                          |   2 +-
 mgmt/api/NetworkUtilsRemote.cc                     |  15 +-
 mgmt/utils/MgmtSocket.cc                           |  20 +
 mgmt/utils/MgmtSocket.h                            |   6 +
 plugins/esi/README.combo                           |   7 +
 plugins/esi/combo_handler.cc                       |  76 +-
 plugins/esi/lib/HandlerManager.h                   |   2 +-
 plugins/experimental/cachekey/cachekey.cc          |  13 +-
 plugins/experimental/cachekey/cachekey.h           |   5 +-
 plugins/experimental/cachekey/configs.cc           |  40 +-
 plugins/experimental/cachekey/configs.h            |  26 +-
 plugins/experimental/cachekey/pattern.cc           |  24 +-
 plugins/experimental/cachekey/pattern.h            |   5 +-
 plugins/experimental/cachekey/plugin.cc            |  12 +-
 .../header_normalize/header_normalize.cc           |   1 +
 plugins/experimental/metalink/metalink.cc          |  19 +-
 plugins/experimental/ts_lua/ts_lua.c               |  16 +-
 plugins/experimental/ts_lua/ts_lua_common.h        |   2 -
 plugins/experimental/ts_lua/ts_lua_hook.c          |  14 -
 plugins/experimental/ts_lua/ts_lua_http_config.c   |   6 +-
 .../experimental/ts_lua/ts_lua_http_milestone.c    |   8 +-
 plugins/experimental/ts_lua/ts_lua_log.c           |   2 +-
 .../experimental/ts_lua/ts_lua_server_request.c    |   4 +
 .../experimental/ts_lua/ts_lua_server_response.c   |   8 +-
 plugins/experimental/ts_lua/ts_lua_util.c          |   9 -
 plugins/header_rewrite/operators.cc                |  96 ++-
 plugins/header_rewrite/operators.h                 |  11 +
 plugins/s3_auth/Makefile.inc                       |   8 +
 plugins/s3_auth/aws_auth_v4.cc                     |  49 +-
 plugins/s3_auth/aws_auth_v4.h                      | 105 +--
 .../s3_auth/{aws_auth_v4.h => aws_auth_v4_wrap.h}  |  90 +-
 plugins/s3_auth/s3_auth.cc                         |   3 +-
 plugins/s3_auth/unit-tests/test_aws_auth_v4.cc     | 953 +++++++++++++++++++++
 plugins/s3_auth/unit-tests/test_aws_auth_v4.h      | 146 ++++
 plugins/tcpinfo/tcpinfo.cc                         |   2 +-
 proxy/Crash.cc                                     |   2 +-
 proxy/InkAPI.cc                                    |  75 +-
 proxy/InkAPITest.cc                                | 236 ++---
 proxy/InkAPITestTool.cc                            |   4 +-
 proxy/Main.cc                                      |  85 +-
 proxy/ParentSelection.cc                           |  46 +-
 proxy/ParentSelection.h                            |   1 +
 proxy/Plugin.cc                                    |   8 +-
 proxy/PluginVC.cc                                  |  12 +-
 proxy/ProxyClientTransaction.h                     |   5 +
 proxy/StatPages.cc                                 |   3 +
 proxy/StatPages.h                                  |   1 +
 proxy/config/records.config.default.in             |   2 +-
 proxy/hdrs/HTTP.cc                                 |   7 +-
 proxy/hdrs/HTTP.h                                  |   2 +-
 proxy/hdrs/HdrToken.cc                             |  12 +-
 proxy/hdrs/HttpCompat.cc                           |   2 +-
 proxy/hdrs/MIME.cc                                 |   8 +-
 proxy/hdrs/MIME.h                                  |   2 +
 proxy/hdrs/Makefile.am                             |   2 +-
 proxy/http/ForwardedConfig.cc                      | 189 ++++
 proxy/http/Http1ClientSession.cc                   |   2 +-
 proxy/http/Http1ClientSession.h                    |   4 +-
 proxy/http/Http1ClientTransaction.h                |  10 +
 proxy/http/HttpConfig.cc                           |  49 +-
 proxy/http/HttpConfig.h                            |  41 +-
 proxy/http/HttpSM.cc                               |   5 +
 proxy/http/HttpTransact.cc                         |  63 +-
 proxy/http/HttpTransact.h                          |   2 +-
 proxy/http/HttpTransactCache.cc                    |  30 +-
 proxy/http/HttpTransactCache.h                     |   9 +-
 proxy/http/HttpTransactHeaders.cc                  | 315 ++++++-
 proxy/http/HttpTransactHeaders.h                   |   9 +
 proxy/http/HttpTunnel.cc                           |   6 +-
 proxy/http/Makefile.am                             |  17 +-
 proxy/http/remap/RemapConfig.cc                    |   2 +-
 proxy/http/remap/RemapProcessor.cc                 |   3 +-
 proxy/http/unit-tests/sym-links/MemView.cc         |   1 +
 proxy/http/unit-tests/test_ForwardedConfig.cc      | 169 ++++
 .../http/unit-tests/test_ForwardedConfig_mocks.cc  |  86 ++
 proxy/http2/HTTP2.cc                               |   4 +-
 proxy/http2/HTTP2.h                                |   1 +
 proxy/http2/Http2ClientSession.cc                  |   6 +
 proxy/http2/Http2ClientSession.h                   |  23 +
 proxy/http2/Http2ConnectionState.cc                |  51 +-
 proxy/http2/Http2ConnectionState.h                 |   2 +-
 proxy/http2/Http2DependencyTree.h                  | 260 +++---
 proxy/http2/Http2Stream.cc                         |   4 +-
 proxy/http2/Http2Stream.h                          |  18 +-
 proxy/http2/Makefile.am                            |   2 +-
 proxy/http2/test_Http2DependencyTree.cc            | 367 ++++++--
 proxy/logcat.cc                                    |   5 +-
 proxy/logging/Log.cc                               |  26 +-
 proxy/logging/LogAccess.cc                         |  62 +-
 proxy/logging/LogAccess.h                          |  22 +-
 proxy/logging/LogAccessHttp.cc                     | 160 ++--
 proxy/logging/LogAccessHttp.h                      | 183 ++--
 proxy/logging/LogConfig.cc                         |   2 +-
 proxy/logging/LogField.cc                          |   2 +
 proxy/logging/LogStandalone.cc                     |   6 +-
 proxy/logstats.cc                                  |  11 +-
 tests/bootstrap.py                                 |   3 +-
 tests/gold_tests/autest-site/microDNS.test.ext     |  77 ++
 tests/gold_tests/autest-site/ports.py              |   6 +-
 .../gold_tests/body_factory/gold/http-get-200.gold |   4 +-
 .../gold_tests/body_factory/gold/http-get-304.gold |   5 +-
 .../body_factory/gold/http-head-200.gold           |   4 +-
 .../body_factory/http204_response.test.py          |   8 +-
 .../body_factory/http204_response_plugin.test.py   |   6 +-
 .../body_factory/http304_response.test.py          |   2 +-
 tests/gold_tests/continuations/double.test.py      | 124 +++
 tests/gold_tests/h2/http2.test.py                  |   5 +-
 tests/gold_tests/headers/data/www.http408.test.txt |   5 +
 .../headers/data/www.redirect0.test_get.txt        |   2 +
 .../headers/domain-blacklist-30x.test.py           |  95 +-
 tests/gold_tests/headers/forwarded-observer.py     |  63 ++
 tests/gold_tests/headers/forwarded.gold            |  41 +
 tests/gold_tests/headers/forwarded.test.py         | 289 +++++++
 tests/gold_tests/headers/http408.test.py           |  22 +-
 tests/gold_tests/headers/normalize_ae.gold         | 104 +++
 tests/gold_tests/headers/normalize_ae.test.py      | 145 ++++
 .../headers/normalize_ae_observer.py}              |  31 +-
 tests/gold_tests/headers/redirect0_get.gold        |  22 +
 tests/gold_tests/logging/ccid_ctid.test.py         | 107 +++
 .../logging/ccid_ctid_observer.py}                 |  45 +-
 tests/gold_tests/logging/gold/field-test.gold      |   3 +
 tests/gold_tests/logging/log-field.test.py         | 102 +++
 .../null_transform/gold/null_transform-200.gold    |  14 +
 .../null_transform/gold/null_transform-tag.gold    |   1 +
 .../null_transform/null_transform.test.py          |  84 ++
 tests/gold_tests/redirect/gold/redirect.gold       |   5 +
 tests/gold_tests/redirect/redirect.test.py         |  63 ++
 tests/gold_tests/remap/gold/remap-DNS-200.gold     |  14 +
 tests/gold_tests/remap/gold/remap-redirect.gold    |  15 +
 tests/gold_tests/remap/gold/remap-referer-hit.gold |  14 +
 .../gold_tests/remap/gold/remap-referer-miss.gold  |  15 +
 tests/gold_tests/remap/remap_http.test.py          |  51 +-
 tests/gold_tests/tls_hooks/tls_hooks12.test.py     |   6 +-
 tests/gold_tests/transaction/txn.test.py           | 121 +++
 tests/tools/microDNS/README.md                     |  46 +
 tests/tools/microDNS/sample_zonefile.json          |   9 +
 tests/tools/microDNS/uDNS.py                       | 181 ++++
 tests/tools/plugins/continuations_verify.cc        | 178 ++++
 .../tools/plugins}/null_transform.c                |   0
 tests/tools/plugins/ssl_hook_test.cc               |   2 +-
 tests/tools/plugins/ssntxnorder_verify.cc          | 328 +++++++
 tests/tools/tcp_408_client.py                      |  63 --
 tests/tools/tcp_client.py                          |  23 +-
 tests/tools/traffic-replay/Config.py               |   2 +-
 tests/tools/traffic-replay/RandomReplay.py         |  13 +-
 tests/tools/traffic-replay/WorkerTask.py           |   2 +-
 tests/tools/traffic-replay/__main__.py             |  13 +-
 tests/tools/traffic-replay/mainProcess.py          |   2 +-
 .../include => tests/unit_tests}/Makefile.am       |  11 +-
 tools/clang-format.sh                              | 103 ++-
 tools/git/pre-commit                               |   4 +-
 tools/jtest/jtest.cc                               |   4 +-
 324 files changed, 9596 insertions(+), 3007 deletions(-)

diff --git a/.gitignore b/.gitignore
index 9f94fa6..de3eff5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,6 +53,7 @@ cmd/traffic_ctl/traffic_ctl
 cmd/traffic_crashlog/traffic_crashlog
 cmd/traffic_top/traffic_top
 cmd/traffic_manager/traffic_manager
+cmd/traffic_manager/test_metrics
 cmd/traffic_layout/traffic_layout
 cmd/traffic_via/traffic_via
 cmd/traffic_wccp/traffic_wccp
@@ -83,6 +84,7 @@ lib/ts/test_X509HostnameValidator
 lib/ts/test_tsutil
 lib/ts/test_MemView
 lib/ts/test_Scalar
+lib/ts/test_tslib
 lib/perl/lib/Apache/TS.pm
 
 iocore/net/test_certlookup
@@ -119,6 +121,7 @@ proxy/logging/test_LogUtils
 plugins/header_rewrite/header_rewrite_test
 plugins/experimental/esi/*_test
 plugins/experimental/sslheaders/test_sslheaders
+plugins/s3_auth/test_s3auth
 
 plugins/esi/docnode_test
 plugins/esi/gzip_test
diff --git a/Makefile.am b/Makefile.am
index ecdac18..926d921 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -75,12 +75,12 @@ asf-distdir:
 
 asf-dist-sign: asf-dist
 	md5sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.md5
-	sha1sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.sha1
+	sha512sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.sha512
 	gpg --armor --output $(distdir).tar.bz2.asc  --detach-sig $(distdir).tar.bz2
 
 asf-dist-sign-rc: asf-dist-rc
 	md5sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.md5
-	sha1sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.sha1
+	sha512sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.sha512
 	gpg --armor --output $(distdir)-rc$(RC).tar.bz2.asc  --detach-sig $(distdir)-rc$(RC).tar.bz2
 
 release: asf-dist-sign
diff --git a/ci/jenkins/bin/autest.sh b/ci/jenkins/bin/autest.sh
index d3cb205..458f90e 100644
--- a/ci/jenkins/bin/autest.sh
+++ b/ci/jenkins/bin/autest.sh
@@ -16,31 +16,44 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-cd "${WORKSPACE}/src"
+# Setup autoconf
+cd src
 [ -d tests ] || exit 0
 
 autoreconf -if
 
 INSTALL="${WORKSPACE}/${BUILD_NUMBER}/install"
-SANDBOX="/var/tmp/ausb-${ghprbPullId}"
+
+URL="https://ci.trafficserver.apache.org/files/autest"
+AUSB="ausb-${ghprbPullId}.${BUILD_NUMBER}"
+SANDBOX="/var/tmp/${AUSB}"
 
 mkdir -p $INSTALL
 
 ./configure --prefix="$INSTALL" \
             --with-user=jenkins \
             --enable-experimental-plugins \
-            --enable-example-plugins \
             --enable-ccache \
             --enable-debug \
             --enable-werror
 
 # Build and run regressions
-${ATS_MAKE} ${ATS_MAKE_FLAGS} V=1 Q=
-${ATS_MAKE} check VERBOSE=Y && ${ATS_MAKE} install
-
+${ATS_MAKE} ${ATS_MAKE_FLAGS} V=1 Q= || exit -1
+${ATS_MAKE} install
 /usr/bin/autest -D ./tests/gold_tests --sandbox "$SANDBOX" --ats-bin "${INSTALL}/bin"
-status="$?"
-
-[ "0" != "$status" ] && exit -1
-exit 0
-
+status=$?
+
+# Cleanup
+cd /var/tmp # To be safer
+chmod -R a+r ${SANDBOX}
+if [ "0" != "$status" ]; then
+    if [ -d "$SANDBOX" ]; then
+        find "$SANDBOX" -name \*.db  -exec rm {} \;
+        mv "$SANDBOX" /CA/autest
+        echo "Sandbox is available at ${URL}/${AUSB}/"
+    fi
+    exit -1
+else
+    [ -d "$SANDBOX" ] && rmdir "$SANDBOX"
+    exit 0
+fi
diff --git a/ci/jenkins/bin/github.sh b/ci/jenkins/bin/github.sh
index 37b15b4..a038dca 100644
--- a/ci/jenkins/bin/github.sh
+++ b/ci/jenkins/bin/github.sh
@@ -32,7 +32,7 @@ autoreconf -if
 
 # Build and run regressions
 ${ATS_MAKE} ${ATS_MAKE_FLAGS} V=1 Q=
-${ATS_MAKE} check VERBOSE=Y && ${ATS_MAKE} install
+${ATS_MAKE} check VERBOSE=Y V=1 && ${ATS_MAKE} install
 
 ${INSTALL}/bin/traffic_server -K -k -R 1
 [ "0" != "$?" ] && exit -1
diff --git a/ci/jenkins/bin/regression.sh b/ci/jenkins/bin/regression.sh
index d54adc1..c217256 100755
--- a/ci/jenkins/bin/regression.sh
+++ b/ci/jenkins/bin/regression.sh
@@ -19,6 +19,6 @@
 cd "${WORKSPACE}/${BUILD_NUMBER}/build"
 [ -d BUILDS ] && cd BUILDS
 
-${ATS_MAKE} check VERBOSE=Y && ${ATS_MAKE} install
+${ATS_MAKE} check VERBOSE=Y V=1 && ${ATS_MAKE} install
 
 "${WORKSPACE}/${BUILD_NUMBER}/install/bin/traffic_server" -k -K -R 1
diff --git a/cmd/traffic_cop/traffic_cop.cc b/cmd/traffic_cop/traffic_cop.cc
index c4ea158..56737e0 100644
--- a/cmd/traffic_cop/traffic_cop.cc
+++ b/cmd/traffic_cop/traffic_cop.cc
@@ -35,6 +35,7 @@
 #include "RecordsConfig.h"
 #include "ts/ink_cap.h"
 #include "Cop.h"
+#include "ts/runroot.cc"
 
 #include <string>
 #include <map>
@@ -543,7 +544,7 @@ ConfigIntFatalError:
   exit(1);
 }
 
-static char *
+static std::string
 config_read_runtime_dir()
 {
   char state_dir[PATH_NAME_MAX];
@@ -553,11 +554,11 @@ config_read_runtime_dir()
   if (strlen(state_dir) > 0) {
     return Layout::get()->relative(state_dir);
   } else {
-    return ats_strdup(Layout::get()->runtimedir);
+    return Layout::get()->runtimedir;
   }
 }
 
-static char *
+static std::string
 config_read_sysconfig_dir()
 {
   char sysconfig_dir[PATH_NAME_MAX];
@@ -567,11 +568,11 @@ config_read_sysconfig_dir()
   if (strlen(sysconfig_dir) > 0) {
     return Layout::get()->relative(sysconfig_dir);
   } else {
-    return ats_strdup(Layout::get()->sysconfdir);
+    return Layout::get()->sysconfdir;
   }
 }
 
-static char *
+static std::string
 config_read_bin_dir()
 {
   char bindir[PATH_NAME_MAX];
@@ -582,11 +583,11 @@ config_read_bin_dir()
   if (strlen(bindir) > 0) {
     return Layout::get()->relative(bindir);
   } else {
-    return ats_strdup(Layout::get()->bindir);
+    return Layout::get()->bindir;
   }
 }
 
-static char *
+static std::string
 config_read_log_dir()
 {
   char logdir[PATH_NAME_MAX];
@@ -596,7 +597,7 @@ config_read_log_dir()
   if (strlen(logdir) > 0) {
     return Layout::get()->relative(logdir);
   } else {
-    return ats_strdup(Layout::get()->logdir);
+    return Layout::get()->logdir;
   }
 }
 
@@ -608,8 +609,8 @@ config_reload_records()
   char log_filename[PATH_NAME_MAX];
   int tmp_int = 3;
 
-  ats_scoped_str bindir;
-  ats_scoped_str logdir;
+  std::string bindir;
+  std::string logdir;
 
   cop_log_trace("Entering %s()\n", __func__);
   // coverity[fs_check_call]
@@ -637,15 +638,15 @@ config_reload_records()
   get_admin_user();
 
   bindir = config_read_bin_dir();
-  if (access(bindir, R_OK) == -1) {
-    cop_log(COP_FATAL, "could not access() \"%s\"\n", (const char *)bindir);
+  if (access(bindir.c_str(), R_OK) == -1) {
+    cop_log(COP_FATAL, "could not access() \"%s\"\n", bindir.c_str());
     cop_log(COP_FATAL, "please set 'proxy.config.bin_path' \n");
     exit(1);
   }
 
   logdir = config_read_log_dir();
-  if (access(logdir, W_OK) == -1) {
-    cop_log(COP_FATAL, "could not access() \"%s\"\n", (const char *)logdir);
+  if (access(logdir.c_str(), W_OK) == -1) {
+    cop_log(COP_FATAL, "could not access() \"%s\"\n", logdir.c_str());
     cop_log(COP_FATAL, "please set 'proxy.config.log.logfile_dir' \n");
     exit(1);
   }
@@ -741,7 +742,7 @@ static void
 spawn_manager()
 {
   char prog[PATH_NAME_MAX];
-  ats_scoped_str bindir(config_read_bin_dir());
+  std::string bindir(config_read_bin_dir());
 
   cop_log_trace("Entering spawn_manager()\n");
 
@@ -1503,8 +1504,8 @@ check(void *arg)
 
     // We do this after the first round of checks, since the first "check" will spawn traffic_manager
     if (!mgmt_init) {
-      ats_scoped_str runtimedir(config_read_runtime_dir());
-      TSInit(runtimedir, static_cast<TSInitOptionT>(TS_MGMT_OPT_NO_EVENTS));
+      std::string runtimedir(config_read_runtime_dir());
+      TSInit(runtimedir.c_str(), static_cast<TSInitOptionT>(TS_MGMT_OPT_NO_EVENTS));
       mgmt_init = true;
 
       // Allow a configurable longer sleep init time
@@ -1627,13 +1628,13 @@ static void
 init_config_file()
 {
   struct stat info;
-  ats_scoped_str config_dir;
+  std::string config_dir;
 
   cop_log_trace("Entering init_config_file()\n");
 
   config_dir = config_read_sysconfig_dir();
-  if (stat(config_dir, &info) < 0) {
-    cop_log(COP_FATAL, "unable to locate config directory '%s'\n", (const char *)config_dir);
+  if (stat(config_dir.c_str(), &info) < 0) {
+    cop_log(COP_FATAL, "unable to locate config directory '%s'\n", config_dir.c_str());
     cop_log(COP_FATAL, " please try setting correct root path in env variable TS_ROOT \n");
     exit(1);
   }
@@ -1642,8 +1643,8 @@ init_config_file()
   if (stat(config_file, &info) < 0) {
     Layout::relative_to(config_file, sizeof(config_file), config_dir, "records.config");
     if (stat(config_file, &info) < 0) {
-      cop_log(COP_FATAL, "unable to locate \"%s/records.config\" or \"%s/records.config.shadow\"\n", (const char *)config_dir,
-              (const char *)config_dir);
+      cop_log(COP_FATAL, "unable to locate \"%s/records.config\" or \"%s/records.config.shadow\"\n", config_dir.c_str(),
+              config_dir.c_str());
       exit(1);
     }
   }
@@ -1667,7 +1668,7 @@ init()
   init_config_file();
   config_reload_records();
 
-  runtime_dir = config_read_runtime_dir();
+  runtime_dir = ats_stringdup(config_read_runtime_dir());
   if (stat(runtime_dir, &info) < 0) {
     cop_log(COP_FATAL, "unable to locate local state directory '%s'\n", runtime_dir);
     cop_log(COP_FATAL, " please try setting correct root path in either env variable TS_ROOT \n");
@@ -1685,7 +1686,8 @@ static const ArgumentDescription argument_descriptions[] = {
   {"stdout", 'o', "Print log messages to standard output", "F", &stdout_flag, nullptr, nullptr},
   {"stop", 's', "Send child processes SIGSTOP instead of SIGKILL", "F", &stop_flag, nullptr, nullptr},
   HELP_ARGUMENT_DESCRIPTION(),
-  VERSION_ARGUMENT_DESCRIPTION()};
+  VERSION_ARGUMENT_DESCRIPTION(),
+  RUNROOT_ARGUMENT_DESCRIPTION()};
 
 int
 main(int /* argc */, const char *argv[])
@@ -1693,6 +1695,7 @@ main(int /* argc */, const char *argv[])
   int fd;
   appVersionInfo.setup(PACKAGE_NAME, "traffic_cop", PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, "");
 
+  runroot_handler(argv);
   // Before accessing file system initialize Layout engine
   Layout::create();
 
diff --git a/cmd/traffic_crashlog/traffic_crashlog.cc b/cmd/traffic_crashlog/traffic_crashlog.cc
index faf98dc..c64aded 100644
--- a/cmd/traffic_crashlog/traffic_crashlog.cc
+++ b/cmd/traffic_crashlog/traffic_crashlog.cc
@@ -30,6 +30,7 @@
 #include "I_RecProcess.h"
 #include "RecordsConfig.h"
 #include "ts/BaseLogFile.h"
+#include "ts/runroot.cc"
 
 static int syslog_mode    = false;
 static int debug_mode     = false;
@@ -48,7 +49,8 @@ static const ArgumentDescription argument_descriptions[] = {
   {"syslog", '-', "Syslog after writing a crash log", "F", &syslog_mode, nullptr, nullptr},
   {"debug", '-', "Enable debugging mode", "F", &debug_mode, nullptr, nullptr},
   HELP_ARGUMENT_DESCRIPTION(),
-  VERSION_ARGUMENT_DESCRIPTION()};
+  VERSION_ARGUMENT_DESCRIPTION(),
+  RUNROOT_ARGUMENT_DESCRIPTION()};
 
 static struct tm
 timestamp()
@@ -65,7 +67,7 @@ crashlog_name()
 {
   char filename[64];
   struct tm now = timestamp();
-  ats_scoped_str logdir(RecConfigReadLogDir());
+  std::string logdir(RecConfigReadLogDir());
   ats_scoped_str pathname;
 
   strftime(filename, sizeof(filename), "crash-%Y-%m-%d-%H%M%S.log", &now);
@@ -117,6 +119,7 @@ main(int /* argc ATS_UNUSED */, const char **argv)
     ATS_UNUSED_RETURN(seteuid(0));
   }
 
+  runroot_handler(argv);
   Layout::create();
   RecProcessInit(RECM_STAND_ALONE, nullptr /* diags */);
   LibRecordsConfigInit();
diff --git a/cmd/traffic_ctl/traffic_ctl.cc b/cmd/traffic_ctl/traffic_ctl.cc
index 88a0bc0..f09f3b2 100644
--- a/cmd/traffic_ctl/traffic_ctl.cc
+++ b/cmd/traffic_ctl/traffic_ctl.cc
@@ -26,6 +26,7 @@
 #include "ts/I_Layout.h"
 #include "I_RecProcess.h"
 #include "RecordsConfig.h"
+#include "ts/runroot.cc"
 
 AppVersionInfo CtrlVersionInfo;
 
@@ -223,6 +224,7 @@ main(int argc, const char **argv)
     {"debug", '-', "Enable debugging output", "F", &debug, nullptr, nullptr},
     HELP_ARGUMENT_DESCRIPTION(),
     VERSION_ARGUMENT_DESCRIPTION(),
+    RUNROOT_ARGUMENT_DESCRIPTION(),
   };
 
   const subcommand commands[] = {
@@ -252,6 +254,7 @@ main(int argc, const char **argv)
     return CtrlSubcommandUsage(nullptr, commands, countof(commands), argument_descriptions, countof(argument_descriptions));
   }
 
+  runroot_handler(argv);
   Layout::create();
   RecProcessInit(RECM_STAND_ALONE, diags);
   LibRecordsConfigInit();
diff --git a/cmd/traffic_layout/traffic_layout.cc b/cmd/traffic_layout/traffic_layout.cc
index e959c54..b680145 100644
--- a/cmd/traffic_layout/traffic_layout.cc
+++ b/cmd/traffic_layout/traffic_layout.cc
@@ -27,6 +27,7 @@
 #include "ts/I_Layout.h"
 #include "I_RecProcess.h"
 #include "RecordsConfig.h"
+#include "ts/runroot.cc"
 
 // Command line arguments (parsing)
 struct CommandLineArgs {
@@ -43,7 +44,8 @@ const ArgumentDescription argument_descriptions[] = {
   {"json", 'j', "Produce output in JSON format (when supported)", "T", &cl.json, nullptr, nullptr},
 
   HELP_ARGUMENT_DESCRIPTION(),
-  VERSION_ARGUMENT_DESCRIPTION()};
+  VERSION_ARGUMENT_DESCRIPTION(),
+  RUNROOT_ARGUMENT_DESCRIPTION()};
 
 // Produce output about compile time features, useful for checking how things were built, as well
 // as for our TSQA test harness.
@@ -129,18 +131,14 @@ produce_features(bool json)
   }
 }
 
-static void
-print_var(const char *name, char *value, bool json, bool free = true, bool last = false)
+void
+print_var(ts::string_view const &name, ts::string_view const &value, bool json, bool last = false)
 {
-  if (json) {
-    printf(R"(    "%s": "%s"%s)", name, value, last ? "\n" : ",\n");
-  } else {
-    printf("%s: %s\n", name, value);
-  }
-
-  if (free) {
-    ats_free(value);
-  }
+  if (json)
+    printf(R"(    "%.*s": "%.*s"%s)", static_cast<int>(name.size()), name.data(), static_cast<int>(value.size()), value.data(),
+           last ? "\n" : ",\n");
+  else
+    printf("%.*s: %.*s\n", static_cast<int>(name.size()), name.data(), static_cast<int>(value.size()), value.data());
 }
 
 static void
@@ -154,14 +152,14 @@ produce_layout(bool json)
   if (json) {
     printf("{\n");
   }
-  print_var("PREFIX", Layout::get()->prefix, json, false); // Don't free this
+  print_var("PREFIX", Layout::get()->prefix, json);
   print_var("BINDIR", RecConfigReadBinDir(), json);
   print_var("SYSCONFDIR", RecConfigReadConfigDir(), json);
-  print_var("LIBDIR", Layout::get()->libdir, json, false); // Don't free this
+  print_var("LIBDIR", Layout::get()->libdir, json);
   print_var("LOGDIR", RecConfigReadLogDir(), json);
   print_var("RUNTIMEDIR", RecConfigReadRuntimeDir(), json);
   print_var("PLUGINDIR", RecConfigReadPluginDir(), json);
-  print_var("INCLUDEDIR", Layout::get()->includedir, json, false); // Dont' free this
+  print_var("INCLUDEDIR", Layout::get()->includedir, json);
   print_var("SNAPSHOTDIR", RecConfigReadSnapshotDir(), json);
 
   print_var("records.config", RecConfigReadConfigPath(nullptr, REC_CONFIG_FILE), json);
@@ -171,7 +169,7 @@ produce_layout(bool json)
   print_var("storage.config", RecConfigReadConfigPath("proxy.config.cache.storage_filename"), json);
   print_var("hosting.config", RecConfigReadConfigPath("proxy.config.cache.hosting_filename"), json);
   print_var("volume.config", RecConfigReadConfigPath("proxy.config.cache.volume_filename"), json);
-  print_var("ip_allow.config", RecConfigReadConfigPath("proxy.config.cache.ip_allow.filename"), json, true, true);
+  print_var("ip_allow.config", RecConfigReadConfigPath("proxy.config.cache.ip_allow.filename"), json, true);
   if (json) {
     printf("}\n");
   }
@@ -187,6 +185,8 @@ main(int /* argc ATS_UNUSED */, const char **argv)
   // Process command line arguments and dump into variables
   process_args(&appVersionInfo, argument_descriptions, countof(argument_descriptions), argv);
 
+  runroot_handler(argv, 0 != cl.json);
+
   if (cl.features) {
     produce_features(0 != cl.json);
   } else {
diff --git a/cmd/traffic_manager/Makefile.am b/cmd/traffic_manager/Makefile.am
index 40342e2..cf64df9 100644
--- a/cmd/traffic_manager/Makefile.am
+++ b/cmd/traffic_manager/Makefile.am
@@ -80,14 +80,6 @@ traffic_manager_LDADD +=\
   @LIBPCRE@ @LIBTCL@ @LIBCAP@ @HWLOC_LIBS@ \
   -lm
 
-# Must do it this way or the dependencies aren't detected.
-if BUILD_WCCP
-traffic_manager_LDADD += \
-  $(top_builddir)/lib/wccp/libwccp.a \
-  $(top_builddir)/lib/tsconfig/libtsconfig.la \
-  @OPENSSL_LIBS@
-endif
-
 test_metrics_SOURCES = test_metrics.cc metrics.cc
 test_metrics_LDADD = \
   $(top_builddir)/mgmt/libmgmt_lm.la \
@@ -98,6 +90,21 @@ test_metrics_LDADD = \
   $(top_builddir)/iocore/eventsystem/libinkevent.a \
   @LIBTCL@ @LIBPCRE@
 
+# Must do it this way or the dependencies aren't detected.
+if BUILD_WCCP
+
+traffic_manager_LDADD += \
+  $(top_builddir)/lib/wccp/libwccp.a \
+  $(top_builddir)/lib/tsconfig/libtsconfig.la \
+  @OPENSSL_LIBS@
+
+test_metrics_LDADD += \
+  $(top_builddir)/lib/wccp/libwccp.a \
+  $(top_builddir)/lib/tsconfig/libtsconfig.la \
+  @OPENSSL_LIBS@
+
+endif
+
 include $(top_srcdir)/build/tidy.mk
 
 tidy-local: $(DIST_SOURCES)
diff --git a/cmd/traffic_manager/metrics.cc b/cmd/traffic_manager/metrics.cc
index eda0ae7..b4d0838 100644
--- a/cmd/traffic_manager/metrics.cc
+++ b/cmd/traffic_manager/metrics.cc
@@ -341,7 +341,7 @@ bool
 metrics_binding_configure(BindingInstance &binding)
 {
   ats_scoped_str sysconfdir(RecConfigReadConfigDir());
-  ats_scoped_str config(Layout::get()->relative_to(sysconfdir, "metrics.config"));
+  ats_scoped_str config(Layout::get()->relative_to(sysconfdir.get(), "metrics.config"));
 
   return binding.require(config.get());
 }
diff --git a/cmd/traffic_manager/traffic_manager.cc b/cmd/traffic_manager/traffic_manager.cc
index 5575fa0..b5e35fa 100644
--- a/cmd/traffic_manager/traffic_manager.cc
+++ b/cmd/traffic_manager/traffic_manager.cc
@@ -27,6 +27,7 @@
 #include "ts/ink_sock.h"
 #include "ts/ink_args.h"
 #include "ts/ink_syslog.h"
+#include "ts/runroot.cc"
 
 #include "WebMgmtUtils.h"
 #include "MgmtUtils.h"
@@ -163,7 +164,7 @@ is_server_idle()
 static void
 check_lockfile()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
+  std::string rundir(RecConfigReadRuntimeDir());
   char lockfile[PATH_NAME_MAX];
   int err;
   pid_t holding_pid;
@@ -299,17 +300,17 @@ initSignalHandlers()
 static void
 init_dirs()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str sysconfdir(RecConfigReadConfigDir());
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string sysconfdir(RecConfigReadConfigDir());
 
-  if (access(sysconfdir, R_OK) == -1) {
-    mgmt_elog(0, "unable to access() config directory '%s': %d, %s\n", (const char *)sysconfdir, errno, strerror(errno));
+  if (access(sysconfdir.c_str(), R_OK) == -1) {
+    mgmt_elog(0, "unable to access() config directory '%s': %d, %s\n", sysconfdir.c_str(), errno, strerror(errno));
     mgmt_elog(0, "please set the 'TS_ROOT' environment variable\n");
     ::exit(1);
   }
 
-  if (access(rundir, R_OK) == -1) {
-    mgmt_elog(0, "unable to access() local state directory '%s': %d, %s\n", (const char *)rundir, errno, strerror(errno));
+  if (access(rundir.c_str(), R_OK) == -1) {
+    mgmt_elog(0, "unable to access() local state directory '%s': %d, %s\n", rundir.c_str(), errno, strerror(errno));
     mgmt_elog(0, "please set 'proxy.config.local_state_dir'\n");
     ::exit(1);
   }
@@ -318,14 +319,14 @@ init_dirs()
 static void
 chdir_root()
 {
-  const char *prefix = Layout::get()->prefix;
+  std::string prefix = Layout::get()->prefix;
 
-  if (chdir(prefix) < 0) {
-    mgmt_elog(0, "unable to change to root directory \"%s\" [%d '%s']\n", prefix, errno, strerror(errno));
+  if (chdir(prefix.c_str()) < 0) {
+    mgmt_elog(0, "unable to change to root directory \"%s\" [%d '%s']\n", prefix.c_str(), errno, strerror(errno));
     mgmt_elog(0, " please set correct path in env variable TS_ROOT \n");
     exit(1);
   } else {
-    mgmt_log("[TrafficManager] using root directory '%s'\n", prefix);
+    mgmt_log("[TrafficManager] using root directory '%s'\n", prefix.c_str());
   }
 }
 
@@ -416,9 +417,11 @@ main(int argc, const char **argv)
 {
   const long MAX_LOGIN = ink_login_name_max();
 
+  runroot_handler(argv);
+
   // Before accessing file system initialize Layout engine
   Layout::create();
-  mgmt_path = Layout::get()->sysconfdir;
+  mgmt_path = Layout::get()->sysconfdir.c_str();
 
   // Set up the application version info
   appVersionInfo.setup(PACKAGE_NAME, "traffic_manager", PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, "");
@@ -454,7 +457,8 @@ main(int argc, const char **argv)
 #endif
     {"nosyslog", '-', "Do not log to syslog", "F", &disable_syslog, nullptr, nullptr},
     HELP_ARGUMENT_DESCRIPTION(),
-    VERSION_ARGUMENT_DESCRIPTION()
+    VERSION_ARGUMENT_DESCRIPTION(),
+    RUNROOT_ARGUMENT_DESCRIPTION()
   };
 
   // Process command line arguments and dump into variables
@@ -488,8 +492,8 @@ main(int argc, const char **argv)
   //  up the manager
   diagsConfig = new DiagsConfig("Manager", DIAGS_LOG_FILENAME, debug_tags, action_tags, false);
   diags       = diagsConfig->diags;
-  diags->set_stdout_output(bind_stdout);
-  diags->set_stderr_output(bind_stderr);
+  diags->set_std_output(StdStream::STDOUT, bind_stdout);
+  diags->set_std_output(StdStream::STDERR, bind_stderr);
 
   RecLocalInit();
   LibRecordsConfigInit();
@@ -534,8 +538,8 @@ main(int argc, const char **argv)
   diagsConfig = new DiagsConfig("Manager", DIAGS_LOG_FILENAME, debug_tags, action_tags, true);
   diags       = diagsConfig->diags;
   RecSetDiags(diags);
-  diags->set_stdout_output(bind_stdout);
-  diags->set_stderr_output(bind_stderr);
+  diags->set_std_output(StdStream::STDOUT, bind_stdout);
+  diags->set_std_output(StdStream::STDERR, bind_stderr);
 
   if (is_debug_tag_set("diags")) {
     diags->dump();
@@ -637,9 +641,9 @@ main(int argc, const char **argv)
   Debug("lm", "Created Web Agent thread (%" PRId64 ")", (int64_t)synthThrId);
 
   // Setup the API and event sockets
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str apisock(Layout::relative_to(rundir, MGMTAPI_MGMT_SOCKET_NAME));
-  ats_scoped_str eventsock(Layout::relative_to(rundir, MGMTAPI_EVENT_SOCKET_NAME));
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string apisock(Layout::relative_to(rundir, MGMTAPI_MGMT_SOCKET_NAME));
+  std::string eventsock(Layout::relative_to(rundir, MGMTAPI_EVENT_SOCKET_NAME));
 
   mode_t oldmask = umask(0);
   mode_t newmode = api_socket_is_restricted() ? 00700 : 00777;
@@ -648,17 +652,16 @@ main(int argc, const char **argv)
   int eventapiFD        = -1; // FD for the api and clients to handle event callbacks
   char mgmtapiFailMsg[] = "Traffic server management API service Interface Failed to Initialize.";
 
-  mgmtapiFD = bind_unix_domain_socket(apisock, newmode);
+  mgmtapiFD = bind_unix_domain_socket(apisock.c_str(), newmode);
   if (mgmtapiFD == -1) {
-    mgmt_log("[WebIntrMain] Unable to set up socket for handling management API calls. API socket path = %s\n",
-             (const char *)apisock);
+    mgmt_log("[WebIntrMain] Unable to set up socket for handling management API calls. API socket path = %s\n", apisock.c_str());
     lmgmt->alarm_keeper->signalAlarm(MGMT_ALARM_WEB_ERROR, mgmtapiFailMsg);
   }
 
-  eventapiFD = bind_unix_domain_socket(eventsock, newmode);
+  eventapiFD = bind_unix_domain_socket(eventsock.c_str(), newmode);
   if (eventapiFD == -1) {
     mgmt_log("[WebIntrMain] Unable to set up so for handling management API event calls. Event Socket path: %s\n",
-             (const char *)eventsock);
+             eventsock.c_str());
   }
 
   umask(oldmask);
@@ -678,7 +681,9 @@ main(int argc, const char **argv)
   metrics_binding_initialize(*binding);
   metrics_binding_configure(*binding);
 
-  int sleep_time = 0; // sleep_time given in sec
+  const int MAX_SLEEP_S      = 60; // Max sleep duration
+  int sleep_time             = 0;  // sleep_time given in sec
+  uint64_t last_start_epoc_s = 0;  // latest start attempt in seconds since epoc
 
   for (;;) {
     lmgmt->processEventQueue();
@@ -737,16 +742,17 @@ main(int argc, const char **argv)
     }
 
     if (lmgmt->run_proxy && !lmgmt->processRunning() && lmgmt->proxy_recoverable) { /* Make sure we still have a proxy up */
-      if (sleep_time) {
+      const uint64_t now = static_cast<uint64_t>(time(nullptr));
+      if (sleep_time && ((now - last_start_epoc_s) < MAX_SLEEP_S)) {
         mgmt_log("Relaunching proxy after %d sec...", sleep_time);
         millisleep(1000 * sleep_time); // we use millisleep instead of sleep because it doesnt interfere with signals
-        sleep_time = (sleep_time > 30) ? 60 : sleep_time * 2;
+        sleep_time = std::min(sleep_time * 2, MAX_SLEEP_S);
       } else {
         sleep_time = 1;
       }
       if (ProxyStateSet(TS_PROXY_ON, TS_CACHE_CLEAR_NONE) == TS_ERR_OKAY) {
-        just_started = 0;
-        sleep_time   = 0;
+        just_started      = 0;
+        last_start_epoc_s = static_cast<uint64_t>(time(nullptr));
       } else {
         just_started++;
       }
diff --git a/cmd/traffic_top/traffic_top.cc b/cmd/traffic_top/traffic_top.cc
index 982983a..99996ff 100644
--- a/cmd/traffic_top/traffic_top.cc
+++ b/cmd/traffic_top/traffic_top.cc
@@ -404,6 +404,7 @@ main(int argc, const char **argv)
     {"sleep", 's', "Enable debugging output", "I", &sleep_time, nullptr, nullptr},
     HELP_ARGUMENT_DESCRIPTION(),
     VERSION_ARGUMENT_DESCRIPTION(),
+    RUNROOT_ARGUMENT_DESCRIPTION(),
   };
 
   process_args(&version, argument_descriptions, countof(argument_descriptions), argv, USAGE);
diff --git a/configure.ac b/configure.ac
index 02a5de5..05e8ec7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -498,13 +498,13 @@ PKG_CHECK_MODULES([LIBMAGICKCPP],[Magick++], [
 AM_CONDITIONAL([BUILD_WEBP_TRANSFORM_PLUGIN], [test "x${enable_webp_transform_plugin}" = "xyes"])
 
 #
-# Example plugins. The example plugins are always built, but not always installed. Installing
+# Example plugins. The example plugins are only built and installed if this is enabled. Installing
 # them is useful for QA, but not useful for most users, so we default this to disabled.
 #
 
 AC_MSG_CHECKING([whether to install example plugins])
 AC_ARG_ENABLE([example-plugins],
-  [AS_HELP_STRING([--enable-example-plugins],[install example plugins])],
+  [AS_HELP_STRING([--enable-example-plugins],[build and install example plugins])],
   [],
   [enable_example_plugins=no]
 )
@@ -1287,13 +1287,36 @@ else
 fi
 AC_SUBST(has_backtrace)
 
-# Remote process unwinding is only implemented on Linux because it depends on various Linux-specific
-# features such as /proc filesystem nodes, ptrace(2) and waitpid(2) extensions.
-AS_IF([test "$host_os_def" = "linux"], [
-  PKG_CHECK_MODULES([LIBUNWIND], [libunwind-ptrace], [
-    enable_remote_unwinding=yes
+#
+# use unwind library when possible (can be disabled)
+#
+AC_MSG_CHECKING([whether to use unwind library])
+AC_ARG_ENABLE([unwind],
+  AS_HELP_STRING([--disable-unwind],[Don't use the unwind library]), [
   ], [
-    dnl no remote unwind support
+    enable_unwind="yes"
+    enable_unwind_default="yes"
+])
+AC_MSG_RESULT([$enable_unwind])
+
+AS_IF([test "x$enable_unwind" = "xyes"], [
+  # Remote process unwinding is only implemented on Linux because it depends on various Linux-specific
+  # features such as /proc filesystem nodes, ptrace(2) and waitpid(2) extensions.
+  AS_IF([test "$host_os_def" = "linux"], [
+    PKG_CHECK_MODULES([LIBUNWIND], [libunwind-ptrace], [
+      enable_remote_unwinding=yes
+    ], [
+      AS_IF([test "x$enable_unwind_default" = "xyes"], [
+        AC_MSG_WARN([unwind not found, try disabling it --disable-unwind])
+      ], [
+        AC_MSG_ERROR([unwind not found, try disabling it --disable-unwind])
+      ])
+    ])], [
+    AS_IF([test "x$enable_unwind_default" = "xyes"], [
+      AC_MSG_WARN([unwind only available on linux, try disabling it --disable-unwind])
+    ], [
+      AC_MSG_ERROR([unwind only available on linux, try disabling it --disable-unwind])
+    ])
   ])
 ])
 TS_ARG_ENABLE_VAR([use], [remote_unwinding])
@@ -1587,7 +1610,6 @@ AC_CHECK_HEADERS([sys/types.h \
                   ctype.h \
                   siginfo.h \
                   malloc.h \
-                  wait.h \
                   float.h \
                   libgen.h \
                   values.h \
@@ -1999,6 +2021,7 @@ AC_CONFIG_FILES([
   tools/Makefile
   tools/trafficserver.pc
   tools/tsxs
+  tests/unit_tests/Makefile
 ])
 
 # -----------------------------------------------------------------------------
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 557d8a4..f45921d 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -46,8 +46,11 @@ PAPER         = letter
 BUILDDIR      = docbuild
 
 # Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
+# [amc] LaTex apparently doesn't work as of Sphinx 1.6.1
+# see https://media.readthedocs.org/pdf/sphinx/1.6.3/sphinx.pdf
+# section 24.3.2 around page 247, third item for 'NotImplementedError', so this is kind of useless.
+PAPEROPT_a4     = -t latex_a4
+PAPEROPT_letter = -t latex_letter
 ALLSPHINXOPTS   = $(SPHINXOPTS)
 # the i18n builder cannot share the environment and doctrees with the others
 I18NSPHINXOPTS  = $(SPHINXOPTS)
diff --git a/doc/admin-guide/configuration/transparent-forward-proxying.en.rst b/doc/admin-guide/configuration/transparent-forward-proxying.en.rst
index cee8ba6..d573c28 100644
--- a/doc/admin-guide/configuration/transparent-forward-proxying.en.rst
+++ b/doc/admin-guide/configuration/transparent-forward-proxying.en.rst
@@ -88,6 +88,9 @@ You may also want to consider some of these configuration options:
 - The client request header X-Forwarded-For may be toggled with
   :ts:cv:`proxy.config.http.insert_squid_x_forwarded_for`.
 
+- The client request header Forwarded may be configured with
+  :ts:cv:`proxy.config.http.insert_forwarded`.
+
 Client Configuration
 ====================
 
diff --git a/doc/admin-guide/files/records.config.en.rst b/doc/admin-guide/files/records.config.en.rst
index 939d17d..cf1b921 100644
--- a/doc/admin-guide/files/records.config.en.rst
+++ b/doc/admin-guide/files/records.config.en.rst
@@ -22,7 +22,7 @@
 records.config
 **************
 
-The :file:`records.config` file (by default, located in
+The :file:`records.config` file (by default (:ts:cv:`proxy.config.config_dir`), located in
 ``/usr/local/etc/trafficserver/``) is a list of configurable variables used by
 the |TS| software. Many of the variables in :file:`records.config` are set
 automatically when you set configuration options with :option:`traffic_ctl config set`. After you
@@ -226,7 +226,8 @@ System Variables
    This is a read-only configuration option that contains the
    ``SYSCONFDIR`` value specified at build time relative to the
    installation prefix. The ``$TS_ROOT`` environment variable can
-   be used alter the installation prefix at run time.
+   be used alter the installation prefix at run time. The directory must
+   allow read/write access for configuration reloads.
 
 .. ts:cv:: CONFIG proxy.config.syslog_facility STRING LOG_DAEMON
 
@@ -1257,7 +1258,9 @@ Parent Proxy Configuration
 
    The total number of connection attempts for a specific transaction allowed to
    a parent cache before Traffic Server bypasses the parent or fails the request
-   (depending on the ``go_direct`` option in the :file:`parent.config` file).
+   (depending on the ``go_direct`` option in the :file:`parent.config` file). The
+   number of parents tried is
+   ``proxy.config.http.parent_proxy.fail_threshold / proxy.config.http.parent_proxy.total_connect_attempts``
 
 .. ts:cv:: CONFIG proxy.config.http.parent_proxy.per_parent_connect_attempts INT 2
    :reloadable:
@@ -1663,17 +1666,54 @@ Proxy User Variables
 
    When enabled (``1``), Traffic Server adds the client IP address to the ``X-Forwarded-For`` header.
 
-.. ts:cv:: CONFIG proxy.config.http.normalize_ae_gzip INT 1
+.. ts:cv:: CONFIG proxy.config.http.insert_forwarded STRING none
    :reloadable:
    :overridable:
 
-   Enable (``1``) to normalize all ``Accept-Encoding:`` headers to one of the following:
+   The default value (``none``) means that Traffic Server does not insert or append information to any
+   ``Forwarded`` header (described in IETF RFC 7239) in the request message.  To put information into a
+   ``Forwarded`` header in the request, the value of this variable must be a list of the ``Forwarded``
+   parameters to be inserted.
 
-   -  ``Accept-Encoding: gzip`` (if the header has ``gzip`` or ``x-gzip`` with any ``q``) **OR**
-   -  *blank* (for any header that does not include ``gzip``)
+   ==================  ===============================================================
+   Parameter           Value of parameter place in outgoing Forwarded header
+   ==================  ===============================================================
+   for                 Client IP address
+   by=ip               Proxy IP address
+   by=unknown          The literal string ``unknown``
+   by=servername       Proxy server name
+   by=uuid             Server UUID prefixed with ``_``
+   proto               Protocol of incoming request
+   host                The host specified in the incoming request
+   connection=compact  Connection with basic transaction codes.
+   connection=std      Connection with detailed transaction codes.
+   connection=full     Full user agent connection :ref:`protocol tags <protocol_tags>`
+   ==================  ===============================================================
 
-   This is useful for minimizing cached alternates of documents (e.g. ``gzip, deflate`` vs. ``deflate, gzip``). Enabling this option is
-   recommended if your origin servers use no encodings other than ``gzip``.
+   Each paramater in the list must be separated by ``|`` or ``:``.  For example, ``for|by=uuid|proto`` is
+   a valid value for this variable.  Note that the ``connection`` parameter is a non-standard extension to
+   RFC 7239.  Also note that, while Traffic Server allows multiple ``by`` parameters for the same proxy, this
+   is prohibited by RFC 7239. Currently, for the ``host`` parameter to provide the original host from the
+   incoming client request, `proxy.config.url_remap.pristine_host_hdr`_ must be enabled.
+
+.. ts:cv:: CONFIG proxy.config.http.normalize_ae INT 1
+   :reloadable:
+   :overridable:
+
+   Specifies normalization, if any, of ``Accept-Encoding:`` headers.
+
+   ===== ======================================================================
+   Value Description
+   ===== ======================================================================
+   ``0`` No normalization.
+   ``1`` ``Accept-Encoding: gzip`` (if the header has ``gzip`` or ``x-gzip`` with any ``q``) **OR**
+         *blank* (for any header that does not include ``gzip``)
+   ``2`` ``Accept-Encoding: br`` if the header has ``br`` (with any ``q``) **ELSE**
+         normalize as for value ``1``
+   ===== ======================================================================
+
+   This is useful for minimizing cached alternates of documents (e.g. ``gzip, deflate`` vs. ``deflate, gzip``).
+   Enabling this option is recommended if your origin servers use no encodings other than ``gzip`` or ``br`` (Brotli).
 
 Security
 ========
@@ -2422,6 +2462,20 @@ DNS
    in DNS Injection attacks), particularly in forward or transparent proxies, but
    requires that the resolver populates the queries section of the response properly.
 
+.. ts:cv:: CONFIG proxy.config.dns.connection_mode INT 0
+
+   Three connection modes between |TS| and nameservers can be set -- UDP_ONLY,
+   TCP_RETRY, TCP_ONLY.
+
+
+   ===== ======================================================================
+   Value Description
+   ===== ======================================================================
+   ``0`` UDP_ONLY:  |TS| always talks to nameservers over UDP.
+   ``1`` TCP_RETRY: |TS| first UDP, retries with TCP if UDP response is truncated.
+   ``2`` TCP_ONLY:  |TS| always talks to nameservers over TCP.
+   ===== ======================================================================
+
 HostDB
 ======
 
@@ -2458,6 +2512,10 @@ HostDB
    For values above ``200000``, you must increase :ts:cv:`proxy.config.hostdb.max_size`
    by at least 44 bytes per entry.
 
+.. ts:cv:: proxy.config.hostdb.round_robin_max_count INT 16
+
+   The maximum count of DNS answers per round robin hostdb record. The default variable is 16.
+
 .. ts:cv:: CONFIG proxy.config.hostdb.ttl_mode INT 0
    :reloadable:
 
@@ -3133,7 +3191,8 @@ SSL Termination
 
    The filename of the default and global ticket key for SSL sessions. The location is relative to the
    :ts:cv:`proxy.config.ssl.server.cert.path` directory. One way to generate this would be to run
-   ``head -c48 /dev/urandom | openssl enc -base64 | head -c48 > file.ticket``.
+   ``head -c48 /dev/urandom | openssl enc -base64 | head -c48 > file.ticket``. Also
+   note that OpenSSL session tickets are sensitive to the version of the ca-certificates.
 
 .. ts:cv:: CONFIG proxy.config.ssl.max_record_size INT 0
 
@@ -3401,6 +3460,31 @@ HTTP/2 Configuration
 
    Enable the experimental HTTP/2 Stream Priority feature.
 
+.. ts:cv:: CONFIG proxy.config.http2.accept_no_activity_timeout INT 120
+   :reloadable:
+   :overridable:
+
+   Specifies how long Traffic Server keeps connections to origin servers open
+   if the transaction stalls. Lowering this timeout can ease pressure on the
+   proxy if misconfigured or misbehaving clients are opening a large number of
+   connections without submitting requests.
+
+.. ts:cv:: CONFIG proxy.config.http2.no_activity_timeout_in INT 120
+   :reloadable:
+   :overridable:
+
+   Specifies how long Traffic Server keeps connections to clients open if a
+   transaction stalls. Lowering this timeout can ease pressure on the proxy if
+   misconfigured or misbehaving clients are opening a large number of
+   connections without submitting requests.
+
+.. ts:cv:: CONFIG proxy.config.http2.push_diary_size INT 256
+   :reloadable:
+
+   Indicates the maximum number of HTTP/2 server pushes that are remembered per
+   HTTP/2 connection to avoid duplicate pushes on the same connection. If the
+   maximum number is reached, new entries are not remembered.
+
 Plug-in Configuration
 =====================
 
diff --git a/doc/admin-guide/files/ssl_multicert.config.en.rst b/doc/admin-guide/files/ssl_multicert.config.en.rst
index e8112a7..b4a5ecc 100644
--- a/doc/admin-guide/files/ssl_multicert.config.en.rst
+++ b/doc/admin-guide/files/ssl_multicert.config.en.rst
@@ -119,6 +119,17 @@ ssl_key_dialog=builtin|"exec:/path/to/program [args]" (optional)
       program runs a security check to ensure that the system is not
       compromised by an attacker before providing the pass phrase.
 
+exit_on_load_error=1|0 (optional)
+  If a certificate configuration does not result in a working
+  configuration, traffic server is not allowed to start when this is
+  set. If a new configuration is broken, a working configuration will
+  not be replaced.
+
+  Exit on load error is enabled by default.
+
+  This option can not be applied to a running Traffic Server using
+  :option:`traffic_ctl config reload`.
+
 Certificate Selection
 =====================
 
diff --git a/doc/admin-guide/logging/formatting.en.rst b/doc/admin-guide/logging/formatting.en.rst
index 4e4cc0b..bbb3150 100644
--- a/doc/admin-guide/logging/formatting.en.rst
+++ b/doc/admin-guide/logging/formatting.en.rst
@@ -179,19 +179,28 @@ Connections and Transactions
 
 .. _sca:
 .. _sstc:
+.. _ccid:
+.. _ctid:
 
 The following log fields are used to list various details of connections and
 transactions between |TS| proxies and origin servers.
 
-===== ====== ==================================================================
-Field Source Description
-===== ====== ==================================================================
-sca   Proxy  Number of attempts within the current transaction by |TS|
-             in connecting to the origin server.
-sstc  Proxy  Number of transactions between the |TS| proxy and the origin
-             server from a single session. Any value greater than zero
-             indicates connection reuse.
-===== ====== ==================================================================
+===== ============== ==================================================================
+Field Source         Description
+===== ============== ==================================================================
+sca   Proxy          Number of attempts within the current transaction by |TS|
+                     in connecting to the origin server.
+sstc  Proxy          Number of transactions between the |TS| proxy and the origin
+                     server from a single session. Any value greater than zero
+                     indicates connection reuse.
+ccid  Client Request Client Connection ID, a non-negative number for a connection,
+                     which is different for all currently-active connections to
+                     clients.
+ctid  Client Request Client Transaction ID, a non-negative number for a transaction,
+                     which is different for all currently-active transactions on the
+                     same client connection.  For client HTTP/2 transactions, this
+                     value is the stream ID for the transaction.
+===== ============== ==================================================================
 
 .. _admin-logging-fields-content-type:
 
diff --git a/doc/admin-guide/monitoring/statistics/core/general.en.rst b/doc/admin-guide/monitoring/statistics/core/general.en.rst
index eb08eda..df77cd0 100644
--- a/doc/admin-guide/monitoring/statistics/core/general.en.rst
+++ b/doc/admin-guide/monitoring/statistics/core/general.en.rst
@@ -117,3 +117,8 @@ General
    A shortened string containing the release number of the running instance of
    |TS|.
 
+.. ts:stat:: global proxy.process.traffic_server.memory.rss integer
+   :unit: bytes
+
+   The resident set size (RSS) of the ``traffic_server`` process. This is
+   basically the amount of memory this process is consuming.
diff --git a/doc/admin-guide/plugins/cachekey.en.rst b/doc/admin-guide/plugins/cachekey.en.rst
index 75e134d..39f7c55 100644
--- a/doc/admin-guide/plugins/cachekey.en.rst
+++ b/doc/admin-guide/plugins/cachekey.en.rst
@@ -74,6 +74,8 @@ Cache key structure and related plugin parameters
 * ``--capture-prefix=<capture_definition>`` (default: empty string) - if specified and not empty then strings are captured from ``host:port`` based on the ``<capture_definition>`` and are added to the cache key.
 * ``--capture-prefix-uri=<capture_definition>`` (default: empty string) - if specified and not empty then strings are captured from the entire URI based on the ``<capture_definition>`` and are added to the cache key.
 * If any of the "Prefix" related plugin parameters are used together in the plugin configuration they are added to the cache key in the order shown in the diagram.
+* ``--remove-prefix=<true|false|yes|no|0|1`` (default: false) - if specified the prefix elements (host, port) are not processed nor appended to the cachekey. All prefix related plugin paramenters are ignored if this parameter is ``true``, ``yes`` or ``1``.
+
 
 
 "User-Agent" section
@@ -139,6 +141,7 @@ Cache key structure and related plugin parameters
 * if no path related plugin parameters are used, the URI path string is included in the cache key.
 * ``--capture-path=<capture_definition>`` (default: empty string) - if specified and not empty then strings are captured from URI path based on the ``<capture_definition>`` and are added to the cache key.
 * ``--capture-path-uri=<capture_definition>`` (default: empty string) - if specified and not empty then strings are captured from the entire URI based on the ``<capture_definition>`` and are added to the cache key.
+* ``--remove-path=<true|false|yes|no|0|1`` (default: false) - if specified the HTTP URI path element is not processed nor appended to the cachekey. All path related plugin paramenters are ignored if this parameter is ``true``, ``yes`` or ``1``.
 
 "Query" section
 ^^^^^^^^^^^^^^^
@@ -162,6 +165,11 @@ All parameters are optional, and if not used, their default values are as mentio
     * ``/<regex>/<replacement>/`` - ``<regex>`` defines regex capturing groups, ``<replacement>`` defines a pattern where the captured strings referenced with ``$0`` ... ``$9`` will be substituted and the result will be added to the cache key.
 
 
+Cache key elements separator
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+* ``--separator=<string>`` - the cache key is constructed by extracting elements from HTTP URI and headers or by using the UA classifiers and they are appended during the key construction and separated by ``/`` (by default). This options allows to override the dafault separator to any string (including an empty string)
+
+
 Detailed examples and troubleshooting
 =====================================
 
@@ -504,3 +512,47 @@ and if ``tool_agents.config`` contains: ::
   ^curl.*
 
 then ``browser`` will be used when constructing the key.
+
+
+Cacheurl plugin to cachekey plugin migration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The plugin `cachekey` was not meant to replace the cacheurl plugin in terms of having exactly the same cache key strings generated. It just allows the operator to exctract elements from the HTTP URI in the same way the `cacheurl` does (through a regular expression, please see `<capture_definition>` above).
+
+The following examples demonstrate different ways to achieve `cacheurl` compatibility on a cache key string level in order to avoid invalidation of the cache. 
+
+The operator could use `--capture-path-uri`, `--capture-path`, `--capture-prefix-uri`, `--capture-prefix` to capture elements from the URI, path and authority elements.
+
+By using `--separator=<string>` the operator could override the default separator to an empty string `--separator=` and thus make sure there are no cache key element separators.
+
+
+Example 1: Let us say we have a capture definition used in `cacheurl`. Now by using `--capture-prefix-uri` one could extract elements through the same caplture definition used with `cacheurl`, remove the cache key element separator `--separator=` and by using `--capture-path-uri` could remove the URI path and by using `--remove-all-params=true` could remove the query string::
+
+  @plugin=cachekey.so \
+      @pparam=--capture-prefix-uri=/.*/$0/ \
+      @pparam=--capture-path-uri=/.*// \
+      @pparam=--remove-all-params=true \
+      @pparam=--separator=
+
+Example 2: A more efficient way would be achieved by using `--capture-prefix-uri` to capture from the URI, remove the cache key element separator `--separator=`  and by using `--remove-path` to remove the URI path and `--remove-all-params=true` to remove the query string::
+
+  @plugin=cachekey.so \
+      @pparam=--capture-prefix-uri=/.*/$0/ \
+      @pparam=--remove-path=true \
+      @pparam=--remove-all-params=true \
+      @pparam=--separator=
+
+Example 3: Same result as the above but this time by using `--capture-path-uri` to capture from the URI, remove the cache key element separator `--separator=` and by using `--remove-prefix` to remove the URI authority elements and by using `--remove-all-params=true` to remove the query string::
+
+    @plugin=cachekey.so \
+        @pparam=--capture-path-uri=/(.*)/$0/ \
+        @pparam=--remove-prefix=true \
+        @pparam=--remove-all-params=true \
+        @pparam=--separator=
+
+Example 4: Let us say that we would like to capture from URI in similar to `cacheurl` way but also sort the query parameters (which is not supported by `cacheurl`). We could achieve that by using `--capture-prefix-uri` to capture by using a caplture definition to process the URI before `?`  and using `--remove-path` to remove the URI path and `--sort-params=true` to sort the query parameters::
+
+    @plugin=cachekey.so \
+        @pparam=--capture-prefix-uri=/([^?]*)/$1/ \
+        @pparam=--remove-path=true \
+        @pparam=--sort-params=true \
+        @pparam=--separator=
diff --git a/doc/admin-guide/plugins/gzip.en.rst b/doc/admin-guide/plugins/gzip.en.rst
index b413a00..59b03dd 100644
--- a/doc/admin-guide/plugins/gzip.en.rst
+++ b/doc/admin-guide/plugins/gzip.en.rst
@@ -158,8 +158,8 @@ supported-algorithms
 Provides the compression algorithms that are supported. This will allow |TS| to selectively
 support gzip, deflate, and brotli compression. The default is gzip. Multiple algorthims can be selected using ',' delimiter,
 for instance, ``supported-algorithms deflate,gzip,br``. Note that if
-ts:cv:`proxy.config.http.normalize_ae_gzip` is enabled (``1``), only gzip will be
-considered.
+ts:cv:`proxy.config.http.normalize_ae` is ``1``, only gzip will be
+considered, and if it is ``2``, only br or gzip will be considered.
 
 Examples
 ========
diff --git a/doc/admin-guide/plugins/header_rewrite.en.rst b/doc/admin-guide/plugins/header_rewrite.en.rst
index 7eb9684..2789e46 100644
--- a/doc/admin-guide/plugins/header_rewrite.en.rst
+++ b/doc/admin-guide/plugins/header_rewrite.en.rst
@@ -1174,3 +1174,8 @@ Add the HTTP Strict Transport Security (HSTS) header if it does not exist and th
     cond %{HEADER:Strict-Transport-Security} ="" [AND]
     cond %{INBOUND:TLS} /./
     set-header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"
+
+This is mostly used by being attached to a remap rule that maps to a host known to support TLS. If
+the parallel `OUTBOUND` supported is added then this could be done by checking for inbound TLS both
+outbound TLS in the `SEND_REQUEST_HDR_HOOK`. However this technique may be used for a non-TLS
+upstream if the goal is to require the user agent to connect to |TS| over TLS.
diff --git a/doc/admin-guide/plugins/ts_lua.en.rst b/doc/admin-guide/plugins/ts_lua.en.rst
index caf715e..331a9a7 100644
--- a/doc/admin-guide/plugins/ts_lua.en.rst
+++ b/doc/admin-guide/plugins/ts_lua.en.rst
@@ -107,7 +107,6 @@ each lua script:
 - **'do_global_send_response'**
 - **'do_global_cache_lookup_complete'**
 - **'do_global_read_cache'**
-- **'do_global_select_alt'**
 
 We can write this in plugin.config:
 
@@ -391,7 +390,6 @@ Hook point constants
     TS_LUA_HOOK_OS_DNS
     TS_LUA_HOOK_PRE_REMAP
     TS_LUA_HOOK_READ_CACHE_HDR
-    TS_LUA_HOOK_SELECT_ALT
     TS_LUA_HOOK_TXN_CLOSE
     TS_LUA_HOOK_POST_REMAP
     TS_LUA_HOOK_CACHE_LOOKUP_COMPLETE
@@ -424,9 +422,6 @@ Additional Information:
 | TS_HTTP_POST          | TS_LUA_HOOK               |     YES              |    NO              |    YES               |
 | _REMAP_HOOK           | _POST_REMAP               |                      |                    |                      |
 +-----------------------+---------------------------+----------------------+--------------------+----------------------+
-| TS_HTTP_SELECT        | TS_LUA_HOOK               |     NO               |    NO              |    NO                |
-| _ALT_HOOK             | _SELECT_ALT               |                      |                    |                      |
-+-----------------------+---------------------------+----------------------+--------------------+----------------------+
 | TS_HTTP_READ          | TS_LUA_HOOK               |     YES              |    NO              |    YES               |
 | _CACHE_HDR_HOOK       | _READ_CACHE_HDR           |                      |                    |                      |
 +-----------------------+---------------------------+----------------------+--------------------+----------------------+
@@ -3108,7 +3103,6 @@ Http config constants
     TS_LUA_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER_MARK
     TS_LUA_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER_MARK
     TS_LUA_CONFIG_HTTP_CACHE_RANGE_LOOKUP
-    TS_LUA_CONFIG_HTTP_NORMALIZE_AE_GZIP
     TS_LUA_CONFIG_HTTP_DEFAULT_BUFFER_SIZE
     TS_LUA_CONFIG_HTTP_DEFAULT_BUFFER_WATER_MARK
     TS_LUA_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE
@@ -3129,6 +3123,7 @@ Http config constants
     TS_LUA_CONFIG_HTTP_CACHE_OPEN_WRITE_FAIL_ACTION
     TS_LUA_CONFIG_HTTP_NUMBER_OF_REDIRECTIONS
     TS_LUA_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES
+    TS_LUA_CONFIG_HTTP_NORMALIZE_AE
     TS_LUA_CONFIG_LAST_ENTRY
 
 `TOP <#ts-lua-plugin>`_
@@ -3374,6 +3369,8 @@ Milestone constants
     TS_LUA_MILESTONE_SM_FINISH
     TS_LUA_MILESTONE_PLUGIN_ACTIVE
     TS_LUA_MILESTONE_PLUGIN_TOTAL
+    TS_LUA_MILESTONE_TLS_HANDSHAKE_START
+    TS_LUA_MILESTONE_TLS_HANDSHAKE_END
 
 
 `TOP <#ts-lua-plugin>`_
diff --git a/doc/appendices/command-line/traffic_ctl.en.rst b/doc/appendices/command-line/traffic_ctl.en.rst
index 026621f..9edfbf2 100644
--- a/doc/appendices/command-line/traffic_ctl.en.rst
+++ b/doc/appendices/command-line/traffic_ctl.en.rst
@@ -51,6 +51,8 @@ of subcommands that control different aspects of Traffic Server:
 :program:`traffic_ctl plugin`
     Interact with plugins.
 
+To use :program:`traffic_ctl`, :ref:`traffic_manager` needs to be running.
+
 Options
 =======
 
diff --git a/doc/conf.py b/doc/conf.py
index 5260a2d..f152ac7 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -335,6 +335,11 @@ latex_elements = {
     #'preamble': '',
 }
 
+if tags.has('latex_a4'):
+    latex_elements['papersize'] = 'a4paper'
+elif tags.has('latex_paper'):
+    latex_elements['papersiize'] = 'letterpaper'
+
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
diff --git a/doc/developer-guide/api/functions/TSContSchedule.en.rst b/doc/developer-guide/api/functions/TSContSchedule.en.rst
index 51e312b..27820cd 100644
--- a/doc/developer-guide/api/functions/TSContSchedule.en.rst
+++ b/doc/developer-guide/api/functions/TSContSchedule.en.rst
@@ -31,7 +31,7 @@ Synopsis
 Description
 ===========
 
-Schedules :arg:`contp` to run :arg:`delay` nanoseconds in the future. This is approximate. The delay
+Schedules :arg:`contp` to run :arg:`delay` milliseconds in the future. This is approximate. The delay
 will be at least :arg:`delay` but possibly more. Resultions finer than roughly 5 milliseconds will
 not be effective. :arg:`contp` is required to have a mutex, which is provided to
 :func:`TSContCreate`.
diff --git a/doc/developer-guide/api/functions/TSHttpArgs.en.rst b/doc/developer-guide/api/functions/TSHttpArgs.en.rst
new file mode 100644
index 0000000..11e770d
--- /dev/null
+++ b/doc/developer-guide/api/functions/TSHttpArgs.en.rst
@@ -0,0 +1,76 @@
+.. Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed
+   with this work for additional information regarding copyright
+   ownership.  The ASF licenses this file to you under the Apache
+   License, Version 2.0 (the "License"); you may not use this file
+   except in compliance with the License.  You may obtain a copy of
+   the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+   implied.  See the License for the specific language governing
+   permissions and limitations under the License.
+
+.. include:: ../../../common.defs
+.. default-domain:: c
+
+TSHttpArgs
+************
+
+Synopsis
+========
+
+`#include <ts/ts.h>`
+
+.. function:: TSReturnCode TSHttpArgIndexReserve(const char * name, const char * description, int * arg_idx)
+.. function:: TSReturnCode TSHttpArgIndexNameLookup(const char * name, int * arg__idx, const char ** description)
+.. function:: TSReturnCode TSHttpArgIndexLookup(int arg_idx, const char ** name, const char ** description)
+.. function:: void TSHttpTxnArgSet(TSHttpTxn txnp, int arg_idx, void * arg)
+.. function:: void * TSHttpTxnArgGet(TSHttpTxn txnp, int arg_idx)
+.. function:: void TSHttpSsnArgSet(TSHttpTxn txnp, int arg_idx, void * arg)
+.. function:: void * TSHttpSsnArgGet(TSHttpTxn txnp, int arg_idx)
+
+Description
+===========
+
+|TS| sessions and transactions provide a fixed array of void pointers that can be used by plugins to
+store information. This can be used to avoid creating a per session or transaction continuations to
+hold data, or to communicate between plugins as the values in the array are visible to any plugin
+which can access the session or transaction. The array values are opaque to |TS| and it will not
+dereference or release them. Plugins are responsible for cleaning up any resources pointed to by the
+values or, if the values are simply values there is no need for the plugin to remove them after the
+session or transaction has completed.
+
+To avoid collisions between plugins a plugin should first *reserve* an index in the array by calling
+:func:`TSHttpArgIndexReserve` passing it an identifying name, a description, and a pointer to an
+integer which will get the reserved index. The function returns :code:`TS_SUCCESS` if an index was
+reserved, :code:`TS_ERROR` if not (most likely because all of the indices have already been
+reserved). Generally this will be a file or library scope global which is set at plugin
+initialization. This function is used in the example remap plugin :ts:git:`example/remap/remap.cc`.
+The index is stored in the global :code:`arg_index`. When an index is reserved it is reserved for
+both sessions and transactions.
+
+To look up the owner of a reserved index use :func:`TSHttpArgIndexNameLookup`. If the :arg:`name` is
+found as an owner, the function returns :code:`TS_SUCCESS` and :arg:`arg_index` is updated with the
+index reserved under that name. If :arg:`description` is not :code:`NULL` then it will be updated
+with the description for that reserved index. This enables communication between plugins where
+plugin "A" reserves an index under a well known name and plugin "B" locates the index by looking it
+up under that name.
+
+The owner of a reserved index can be found with :func:`TSHttpArgIndexLookup`. If :arg:`arg_index` is
+reserved then the function returns :code:`TS_SUCCESS` and :arg:`name` and :arg:`description` are
+updated. :arg:`name` must point at a valid character pointer but :arg:`description` can be
+:code:`NULL`.
+
+Manipulating the array is simple. :func:`TSHttpTxnArgSet` sets the array slot at :arg:`arg_idx` for
+the transaction :arg:`txnp` to the value :arg:`arg`. Note this sets the value only for the specific
+transaction. Similarly :func:`TSHttpSsnArgSet` sets the value for a session argument. The values can
+be retrieved with :func:`TSHttpTxnArgGet` for transactions and :func:`TSHttpSsnArgGet` for sessions,
+which return the specified value. Values that have not been set are :code:`NULL`.
+
+.. note:: Session arguments persist for the entire session, which means potentially across all transactions in that session.
+
+.. note:: Following arg index reservations is conventional, it is not enforced.
diff --git a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
index b4907db..e1340ad 100644
--- a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
+++ b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst
@@ -123,6 +123,7 @@ c:member:`TS_CONFIG_HTTP_INSERT_AGE_IN_RESPONSE`                    :ts:cv:`prox
 c:member:`TS_CONFIG_HTTP_INSERT_REQUEST_VIA_STR`                    :ts:cv:`proxy.config.http.insert_request_via_str`
 c:member:`TS_CONFIG_HTTP_INSERT_RESPONSE_VIA_STR`                   :ts:cv:`proxy.config.http.insert_response_via_str`
 c:member:`TS_CONFIG_HTTP_INSERT_SQUID_X_FORWARDED_FOR`              :ts:cv:`proxy.config.http.insert_squid_x_forwarded_for`
+c:member:`TS_CONFIG_HTTP_INSERT_FORWARDED`                          :ts:cv:`proxy.config.http.insert_forwarded`
 c:member:`TS_CONFIG_HTTP_KEEP_ALIVE_ENABLED_IN`                     :ts:cv:`proxy.config.http.keep_alive_enabled_in`
 c:member:`TS_CONFIG_HTTP_KEEP_ALIVE_ENABLED_OUT`                    :ts:cv:`proxy.config.http.keep_alive_enabled_out`
 c:member:`TS_CONFIG_HTTP_KEEP_ALIVE_NO_ACTIVITY_TlMEOUT_IN`         :ts:cv:`proxy.config.http.keep_alive_no_activity_timeout_in`
@@ -132,7 +133,6 @@ c:member:`TS_CONFIG_HTTP_NEGATIVE_CACHING_ENABLED`                  :ts:cv:`prox
 c:member:`TS_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME`                 :ts:cv:`proxy.config.http.negative_caching_lifetime`
 c:member:`TS_CONFIG_HTTP_NEGATIVE_REVALIDATING_ENABLED`             :ts:cv:`proxy.config.http.negative_revalidating_enabled`
 c:member:`TS_CONFIG_HTTP_NEGATIVE_REVALIDATING_LIFETIME`            :ts:cv:`proxy.config.http.negative_revalidating_lifetime`
-c:member:`TS_CONFIG_HTTP_NORMALIZE_AE_GZIP`                         :ts:cv:`proxy.config.http.normalize_ae_gzip`
 c:member:`TS_CONFIG_HTTP_NUMBER_OF_REDIRECTIONS`                    :ts:cv:`proxy.config.http.number_of_redirections`
 c:member:`TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS`                    :ts:cv:`proxy.config.http.origin_max_connections`
 c:member:`TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE`              :ts:cv:`proxy.config.http.origin_max_connections_queue`
@@ -171,6 +171,7 @@ c:member:`TS_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD`               :ts:cv:`prox
 c:member:`TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME`                   :ts:cv:`proxy.config.http.parent_proxy.retry_time`
 c:member:`TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS`               :ts:cv:`proxy.config.http.parent_proxy.per_parent_connect_attempts`
 c:member:`TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT`            :ts:cv:`proxy.config.http.parent_proxy.connect_attempts_timeout`
+c:member:`TS_CONFIG_HTTP_NORMALIZE_AE`                              :ts:cv:`proxy.config.http.normalize_ae`
 ==================================================================  ====================================================================
 
 Examples
diff --git a/doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst b/doc/developer-guide/api/functions/TSHttpTxnAborted.en.rst
similarity index 65%
copy from doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst
copy to doc/developer-guide/api/functions/TSHttpTxnAborted.en.rst
index de5c74a..d0638c5 100644
--- a/doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst
+++ b/doc/developer-guide/api/functions/TSHttpTxnAborted.en.rst
@@ -18,15 +18,25 @@
 
 .. default-domain:: c
 
-TSMimeHdrFieldCreate
-********************
+TSHttpTxnAborted
+================
 
 Synopsis
-========
+--------
 
 `#include <ts/ts.h>`
 
-.. function:: TSReturnCode TSMimeHdrFieldCreate(TSMBuffer bufp, TSMLoc hdr, TSMLoc * locp)
+.. c:function:: TSReturnCode TSHttpTxnAborted(TSHttpTxn txnp)
 
 Description
-===========
+-----------
+
+:c:func:`TSHttpTxnAborted` returns :c:data:`TS_SUCCESS` if the requested
+transaction is aborted. This function should be used to determine whether
+a transaction has been aborted before attempting to cache the results.
+
+Return values
+-------------
+
+The API returns :c:data:`TS_SUCCESS`, if the requested transaction is aborted,
+:c:data:`TS_ERROR` otherwise.
\ No newline at end of file
diff --git a/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst
index e0cd0ac..08fb5d3 100644
--- a/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst
+++ b/doc/developer-guide/api/functions/TSHttpTxnMilestoneGet.en.rst
@@ -52,14 +52,6 @@ is successful.
 
 		The client connection is accepted.
 
-	.. macro:: TS_MILESTONE_PLUGIN_ACTIVE
-
-		Amount of time plugins were active plus start time.
-
-	.. macro:: TS_MILESTONE_PLUGIN_TOTAL
-
-		Wall time while plugins were active plus start time.
-
 	.. macro:: TS_MILESTONE_UA_READ_HEADER_DONE
 
 		The request header from the client has been read and parsed.
@@ -136,6 +128,14 @@ is successful.
 
 		Amount of time spent in or waiting for plugins.
 
+   .. macro:: TS_MILESTONE_TLS_HANDSHAKE_START
+
+      Timestamp when the server starts the TLS handshake. 0 if no handshake is performed (connection reuse).
+
+   .. macro:: TS_MILESTONE_TLS_HANDSHAKE_END
+
+      Timestamp when the server completes the TLS handshake. 0 if no handshake is performed (connection reuse).
+
 	.. macro:: TS_MILESTONE_LAST_ENTRY
 
 		A psuedo index which is set to be one more than the last valid index. This is useful for looping over the data.
diff --git a/doc/developer-guide/api/functions/TSMimeHdrFieldAppend.en.rst b/doc/developer-guide/api/functions/TSMimeHdrFieldAppend.en.rst
index 24038da..7690fa8 100644
--- a/doc/developer-guide/api/functions/TSMimeHdrFieldAppend.en.rst
+++ b/doc/developer-guide/api/functions/TSMimeHdrFieldAppend.en.rst
@@ -31,17 +31,10 @@ Synopsis
 Description
 ===========
 
-Returns the :c:type:`TSMLoc` location of a specified MIME field from
-within the MIME header located at :arg:`hdr`.
-
-The retrieved_str parameter specifies which field to retrieve.  For
-each MIME field in the MIME header, a pointer comparison is done
-between the field name and retrieved_str.  This is a much quicker
-retrieval function than :c:func:`TSMimeHdrFieldFind` since it obviates
-the need for a string comparison.  However, retrieved_str must be one
-of the predefined field names of the form ``TS_MIME_FIELD_XXX``
-for the call to succeed.  Release the returned :c:type:`TSMLoc` handle
-with a call to :c:func:`TSHandleMLocRelease`.
-
-.. XXX The above is surely from the documentation of another function. Confirm
-       and remove from here (or relocate to the appropriate function's doc).
+Attaches a MIME :arg:`field` to a header. The header is represented by the :arg:`bufp` and :arg:`hdr`
+arguments which should have been obtained by a call to :func:`TSHttpTxnClientReqGet` or similar. If
+the field in :arg:`field` was created by calling :func:`TSMimeHdrFieldCreateNamed` the same
+:arg:`bufp` and :arg:`hdr` passed to that should be passed to this function.
+
+Returns :code:`TS_SUCCESS` if the :arg:`field` was attached to the header, :code:`TS_ERROR` if it
+was not. Fields cannot be attached to read only headers.
diff --git a/doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst b/doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst
index de5c74a..8373a07 100644
--- a/doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst
+++ b/doc/developer-guide/api/functions/TSMimeHdrFieldCreate.en.rst
@@ -26,7 +26,23 @@ Synopsis
 
 `#include <ts/ts.h>`
 
-.. function:: TSReturnCode TSMimeHdrFieldCreate(TSMBuffer bufp, TSMLoc hdr, TSMLoc * locp)
+.. function:: TSReturnCode TSMimeHdrFieldCreate(TSMBuffer bufp, TSMLoc hdr, TSMLoc * out)
+.. function:: TSReturnCode TSMimeHdrFieldCreateNamed(TSMBuffer bufp, TSMLoc hdr, const char * name, int name_len, TSMLoc * out)
 
 Description
 ===========
+
+These functions create MIME fields in a MIME header. The header is specified by the combination of
+the buffer :arg:`bufp` and a location :arg:`hdr`. The header must be either created such as by
+:func:`TSMimeHdrCreate` or be an existing header found via a function such as :func:`TSHttpTxnClientReqGet`.
+
+:func:`TSMimeHdrFieldCreate` creates a completely empty field which must be named before being used
+in a header, usually via :func:`TSMimeHdrFieldNameSet`. It is almost always more convenient to use
+:func:`TSMimeHdrFieldCreateNamed` which combines these two steps, creating the field and then
+setting the name to :arg:`name`.
+
+For both functions a reference to the new field is returned via arg:`out`.
+
+The field created is not in a header even though it is in the same buffer. It can be added to a
+header with :func:`TSMimeHdrFieldAppend`. The field also has no value, only a name. If a value is
+needed it must be added explicitly with a function such as :func:`TSMimeHdrFieldValueIntSet`.
diff --git a/doc/developer-guide/api/functions/TSUuidCreate.en.rst b/doc/developer-guide/api/functions/TSUuidCreate.en.rst
index 2239184..9f91650 100644
--- a/doc/developer-guide/api/functions/TSUuidCreate.en.rst
+++ b/doc/developer-guide/api/functions/TSUuidCreate.en.rst
@@ -88,8 +88,9 @@ object, but it does not need to be previously initialized.
 
 Finally, :func:`TSClientRequestUuidGet` can be used to extract
 the client request uuid from a transaction. The output buffer must be of
-sufficient length, minimum of ``TS_CRUUID_STRING_LEN``. This produces the same
-string as the log tag %<cruuid> generates.
+sufficient length, minimum of ``TS_CRUUID_STRING_LEN`` + 1 bytes. This
+produces the same string as the log tag %<cruuid> generates, and it will
+be NULL terminated.
 
 Return Values
 =============
diff --git a/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst b/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
index 3cd4a89..94c94d2 100644
--- a/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
+++ b/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst
@@ -99,7 +99,6 @@ Enumeration Members
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_NEGATIVE_REVALIDATING_ENABLED
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_NEGATIVE_REVALIDATING_LIFETIME
-.. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_NORMALIZE_AE_GZIP
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_NUMBER_OF_REDIRECTIONS
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE
@@ -131,6 +130,7 @@ Enumeration Members
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_URL_REMAP_PRISTINE_HOST_HDR
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT
 .. c:member:: TSOverridableConfigKey  TS_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT
+.. c:member:: TSOverridableConfigKey  TS_CONFIG_HTTP_NORMALIZE_AE
 
 Description
 ===========
diff --git a/doc/developer-guide/cache-architecture/data-structures.en.rst b/doc/developer-guide/cache-architecture/data-structures.en.rst
index 5256f06..3efff1f 100644
--- a/doc/developer-guide/cache-architecture/data-structures.en.rst
+++ b/doc/developer-guide/cache-architecture/data-structures.en.rst
@@ -178,7 +178,7 @@ Data Structures
       fragment containing the first byte in the range can be computed and loaded
       directly without further disk access.
 
-      Removed as of version 3.3.0.
+      Removed as of version 3.3.0. [#fragment-offset-table]_
 
    .. cpp:member:: uint32_t Doc::sync_serial
 
diff --git a/doc/developer-guide/plugins/adding-statistics.en.rst b/doc/developer-guide/plugins/adding-statistics.en.rst
index 8a7964c..aae3283 100644
--- a/doc/developer-guide/plugins/adding-statistics.en.rst
+++ b/doc/developer-guide/plugins/adding-statistics.en.rst
@@ -37,8 +37,8 @@ with :c:func:`TSStatIntSet`, and increment it with :c:func:`TSStatIntIncrement`
 :c:func:`TSStatIntDecrement`.
 
 .. literalinclude:: ../../../example/statistic/statistic.cc
-   :language: c
-   :lines: 30-
+   :language: cpp
+   :lines: 32-
 
 If this plugin is loaded, then the statistic can be accessed with ::
 
diff --git a/doc/developer-guide/plugins/example-plugins/index.en.rst b/doc/developer-guide/plugins/example-plugins/index.en.rst
index 9ab3492..5141eee 100644
--- a/doc/developer-guide/plugins/example-plugins/index.en.rst
+++ b/doc/developer-guide/plugins/example-plugins/index.en.rst
@@ -47,7 +47,11 @@ understand the following topics:
 -  Working with HTTP header functions
 
 The two sample plugins discussed in this chapter are ``blacklist_1.c``
-and ``basic_auth.c``.
+and ``basic_auth.c``. To build and install the example plugins use ::
+
+   ./configure --enable-example-plugins
+
+when :ref:`setting the build configuration <admin-configuration-options>` for |TS|.
 
 Overview
 --------
diff --git a/doc/developer-guide/plugins/http-transformations/index.en.rst b/doc/developer-guide/plugins/http-transformations/index.en.rst
index cc35380..7e747d2 100644
--- a/doc/developer-guide/plugins/http-transformations/index.en.rst
+++ b/doc/developer-guide/plugins/http-transformations/index.en.rst
@@ -181,5 +181,5 @@ and will keep the transaction and the origin server connection up. This is usefu
 run to completion even if the user agent disconnects. Examples would be a standard transform that is expensive to initiate, or expensive
 origin server connections that should be :ts:cv:`shared <proxy.config.http.server_session_sharing.match>`.
 
-There is an `example plugin <https://github.com/apache/trafficserver/blob/master/example/txn-data-sink/txn-data-sink.c>`_ that demonstrates
+There is an `example plugin <https://github.com/apache/trafficserver/blob/master/example/txn_data_sink/txn_data_sink.c>`_ that demonstrates
 this used as a pure data sink to keep the transaction up regardless of whether the user agent disconnects.
diff --git a/doc/ext/traffic-server.py b/doc/ext/traffic-server.py
index b5ef358..acc6ec1 100644
--- a/doc/ext/traffic-server.py
+++ b/doc/ext/traffic-server.py
@@ -38,14 +38,16 @@ import subprocess
 import re
 
 # 2/3 compat logic
-try :
+try:
     basestring
-    def is_string_type(s) :
+
+    def is_string_type(s):
         return isinstance(s, basestring)
-except NameError :
-    def is_string_type(s) :
+except NameError:
+    def is_string_type(s):
         return isinstance(s, str)
 
+
 class TSConfVar(std.Target):
     """
     Description of a traffic server configuration variable.
@@ -201,7 +203,7 @@ class TSStat(std.Target):
         field = nodes.field()
         field.append(nodes.field_name(text=tag))
         body = nodes.field_body()
-        if is_string_type(value) :
+        if is_string_type(value):
             body.append(sphinx.addnodes.compact_paragraph(text=value))
         else:
             body.append(value)
@@ -356,14 +358,15 @@ class TrafficServerDomain(Domain):
 
     # Python 2/3 compat - iteritems is 2, items is 3
     # Although perhaps the lists are small enough items could be used in Python 2.
-    try :
+    try:
         {}.iteritems()
+
         def get_objects(self):
             for var, doc in self.data['cv'].iteritems():
                 yield var, var, 'cv', doc, var, 1
             for var, doc in self.data['stat'].iteritems():
                 yield var, var, 'stat', doc, var, 1
-    except AttributeError :
+    except AttributeError:
         def get_objects(self):
             for var, doc in self.data['cv'].items():
                 yield var, var, 'cv', doc, var, 1
diff --git a/doc/static/languages.json b/doc/static/languages.json
index 753617e..9fef826 100644
--- a/doc/static/languages.json
+++ b/doc/static/languages.json
@@ -1,4 +1,4 @@
 {
-  "en": { "name": "English", "versions": ["latest","7.0.x","6.2.x","5.3.x"] },
-  "ja": { "name": "日本語", "versions": ["latest","7.0.x","6.2.x","5.3.x"] }
+  "en": { "name": "English", "versions": ["latest","7.1.x","6.2.x","5.3.x"] },
+  "ja": { "name": "日本語", "versions": ["latest","7.1.x","6.2.x","5.3.x"] }
 }
diff --git a/example/Makefile.am b/example/Makefile.am
index 8cd7a49..27621e3 100644
--- a/example/Makefile.am
+++ b/example/Makefile.am
@@ -23,6 +23,8 @@ AM_LDFLAGS = $(TS_PLUGIN_LD_FLAGS)
 libatscppapi = $(top_builddir)/lib/cppapi/libatscppapi.la
 libtsconfig = $(top_builddir)/lib/tsconfig/libtsconfig.la
 
+if BUILD_EXAMPLE_PLUGINS
+
 example_Plugins = \
 	add_header.la \
 	append_transform.la \
@@ -49,13 +51,14 @@ example_Plugins = \
 	response_header_1.la \
 	secure_link.la \
 	server_push.la \
-	server-transform.la \
-	ssl-preaccept.la \
-	ssl-sni-whitelist.la \
-	ssl-sni.la \
+	server_transform.la \
+	session_hooks.la \
+	ssl_preaccept.la \
+	ssl_sni_whitelist.la \
+	ssl_sni.la \
 	statistic.la \
-	thread-1.la \
-	txn-data-sink.la \
+	thread_1.la \
+	txn_data_sink.la \
 	version.la \
 	disable_http2.la
 
@@ -85,10 +88,8 @@ example_Plugins += \
 	cppapi/boom.la \
 	cppapi/intercept.la
 
-if BUILD_EXAMPLE_PLUGINS
 pkglib_LTLIBRARIES = $(example_Plugins)
-else
-noinst_LTLIBRARIES = $(example_Plugins)
+
 endif
 
 add_header_la_SOURCES = add_header/add_header.c
@@ -107,7 +108,7 @@ output_header_la_SOURCES = output_header/output_header.c
 passthru_la_SOURCES = passthru/passthru.cc
 protocol_la_SOURCES = protocol/Protocol.c protocol/TxnSM.c
 protocol_stack_la_SOURCES = protocol_stack/protocol_stack.cc
-psi_la_SOURCES = thread-pool/psi.c thread-pool/thread.c
+psi_la_SOURCES = thread_pool/psi.c thread_pool/thread.c
 query_remap_la_SOURCES = query_remap/query_remap.c
 remap_header_add_la_SOURCES = remap_header_add/remap_header_add.cc
 remap_la_SOURCES = remap/remap.cc
@@ -115,22 +116,19 @@ replace_header_la_SOURCES = replace_header/replace_header.c
 response_header_1_la_SOURCES = response_header_1/response_header_1.c
 secure_link_la_SOURCES = secure_link/secure_link.c
 server_push_la_SOURCES = server_push/server_push.c
-server_transform_la_SOURCES = server-transform/server-transform.c
-ssl_preaccept_la_SOURCES = ssl-preaccept/ssl-preaccept.cc
-ssl_sni_la_SOURCES = ssl-sni/ssl-sni.cc
+server_transform_la_SOURCES = server_transform/server_transform.c
+ssl_preaccept_la_SOURCES = ssl_preaccept/ssl_preaccept.cc
+ssl_sni_la_SOURCES = ssl_sni/ssl_sni.cc
 ssl_sni_la_LIBADD = $(libtsconfig)
-ssl_sni_whitelist_la_SOURCES = ssl-sni-whitelist/ssl-sni-whitelist.cc
+ssl_sni_whitelist_la_SOURCES = ssl_sni_whitelist/ssl_sni_whitelist.cc
 ssl_sni_whitelist_la_LIBADD = $(libtsconfig)
 disable_http2_la_SOURCES = disable_http2/disable_http2.cc
 statistic_la_SOURCES = statistic/statistic.cc
-thread_1_la_SOURCES = thread-1/thread-1.c
-txn_data_sink_la_SOURCES = txn-data-sink/txn-data-sink.c
+thread_1_la_SOURCES = thread_1/thread_1.c
+txn_data_sink_la_SOURCES = txn_data_sink/txn_data_sink.c
 version_la_SOURCES = version/version.c
 redirect_1_la_SOURCES = redirect_1/redirect_1.c
-
-# The following examples do not build:
-#
-# session_1_la_SOURCES = session-1/session-1.c
+session_hooks_la_SOURCES = session_hooks/session_hooks.c
 
 cppapi_AsyncHttpFetchStreaming_la_SOURCES = cppapi/async_http_fetch_streaming/AsyncHttpFetchStreaming.cc
 cppapi_AsyncHttpFetch_la_SOURCES = cppapi/async_http_fetch/AsyncHttpFetch.cc
diff --git a/example/null_transform/null_transform.c b/example/null_transform/null_transform.c
index cc941cd..1de21e0 100644
--- a/example/null_transform/null_transform.c
+++ b/example/null_transform/null_transform.c
@@ -1,6 +1,6 @@
 /** @file
 
-  A brief file description
+  An example program that does a null transform of response body content.
 
   @section license License
 
@@ -21,17 +21,6 @@
   limitations under the License.
  */
 
-/* null_transform.c:  an example program that does a null transform
- *                    of response body content
- *
- *
- *
- *	Usage:
- *	  null_transform.so
- *
- *
- */
-
 #include <stdio.h>
 #include <unistd.h>
 
diff --git a/example/server-transform/server-transform.c b/example/server_transform/server_transform.c
similarity index 85%
rename from example/server-transform/server-transform.c
rename to example/server_transform/server_transform.c
index ac91787..0c93be1 100644
--- a/example/server-transform/server-transform.c
+++ b/example/server_transform/server_transform.c
@@ -1,6 +1,26 @@
 /** @file
 
-  A brief file description
+  @brief An example program that sends response content to a server to be transformed and sends the
+         transformed content to the client.
+
+  The protocol spoken with the server is simple. The plugin sends the
+  content-length of the document being transformed as a 4-byte
+  integer and then it sends the document itself. The first 4-bytes of
+  the server response are a status code/content length. If the code
+  is greater than 0 then the plugin assumes transformation was
+  successful and uses the code as the content length of the
+  transformed document. If the status code is less than or equal to 0
+  then the plugin bypasses transformation and sends the original
+  document on through.
+
+  The plugin does a fair amount of error checking and tries to bypass
+  transformation in many cases such as when it can't connect to the
+  server. This example plugin simply connects to port 7 on localhost,
+  which on our solaris machines (and most unix machines) is the echo
+  port. One nicety about the protocol is that simply having the
+  server echo back what it is sent results in a "null"
+  transformation. (i.e. A transformation which does not modify the
+  content).
 
   @section license License
 
@@ -19,37 +39,7 @@
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
- */
-
-/* server-transform.c:  an example program that sends response content
- *                      to a server to be transformed, and sends the
- *                      transformed content to the client
- *
- *
- *	Usage:
- *	  server-transform.so
- *
- *
- */
-
-/* The protocol spoken with the server is simple. The plugin sends the
-   content-length of the document being transformed as a 4-byte
-   integer and then it sends the document itself. The first 4-bytes of
-   the server response are a status code/content length. If the code
-   is greater than 0 then the plugin assumes transformation was
-   successful and uses the code as the content length of the
-   transformed document. If the status code is less than or equal to 0
-   then the plugin bypasses transformation and sends the original
-   document on through.
-
-   The plugin does a fair amount of error checking and tries to bypass
-   transformation in many cases such as when it can't connect to the
-   server. This example plugin simply connects to port 7 on localhost,
-   which on our solaris machines (and most unix machines) is the echo
-   port. One nicety about the protocol is that simply having the
-   server echo back what it is sent results in a "null"
-   transformation. (i.e. A transformation which does not modify the
-   content). */
+*/
 
 #include <string.h>
 #include <stdio.h>
@@ -59,6 +49,8 @@
 #include "ts/ts.h"
 #include "ts/ink_defs.h"
 
+#define PLUGIN_NAME "server-transform"
+
 #define STATE_BUFFER 1
 #define STATE_CONNECT 2
 #define STATE_WRITE 3
@@ -141,7 +133,7 @@ transform_destroy(TSCont contp)
 
     TSfree(data);
   } else {
-    TSError("[server_transform] Unable to get Continuation's Data. TSContDataGet returns NULL");
+    TSError("[%s] Unable to get Continuation's Data. TSContDataGet returns NULL", PLUGIN_NAME);
   }
 
   TSContDestroy(contp);
@@ -183,7 +175,7 @@ transform_connect(TSCont contp, TransformData *data)
       data->input_reader = tempReader;
     }
   } else {
-    TSError("[server_transform] TSIOBufferReaderAvail returns TS_ERROR");
+    TSError("[%s] TSIOBufferReaderAvail returns TS_ERROR", PLUGIN_NAME);
     return 0;
   }
 
@@ -193,7 +185,7 @@ transform_connect(TSCont contp, TransformData *data)
   ip_addr.sin_family      = AF_INET;
   ip_addr.sin_addr.s_addr = server_ip; /* Should be in network byte order */
   ip_addr.sin_port        = server_port;
-  TSDebug("strans", "net connect.");
+  TSDebug(PLUGIN_NAME, "net connect.");
   action = TSNetConnect(contp, (struct sockaddr const *)&ip_addr);
 
   if (!TSActionDone(action)) {
@@ -214,7 +206,7 @@ transform_write(TSCont contp, TransformData *data)
   if (content_length >= 0) {
     data->server_vio = TSVConnWrite(data->server_vc, contp, TSIOBufferReaderClone(data->input_reader), content_length);
   } else {
-    TSError("[server_transform] TSIOBufferReaderAvail returns TS_ERROR");
+    TSError("[%s] TSIOBufferReaderAvail returns TS_ERROR", PLUGIN_NAME);
   }
   return 0;
 }
@@ -229,7 +221,7 @@ transform_read_status(TSCont contp, TransformData *data)
   if (data->output_reader != NULL) {
     data->server_vio = TSVConnRead(data->server_vc, contp, data->output_buf, sizeof(int));
   } else {
-    TSError("[server_transform] Error in Allocating a Reader to output buffer. TSIOBufferReaderAlloc returns NULL");
+    TSError("[%s] Error in Allocating a Reader to output buffer. TSIOBufferReaderAlloc returns NULL", PLUGIN_NAME);
   }
 
   return 0;
@@ -247,11 +239,11 @@ transform_read(TSCont contp, TransformData *data)
   data->server_vio = TSVConnRead(data->server_vc, contp, data->output_buf, data->content_length);
   data->output_vc  = TSTransformOutputVConnGet((TSVConn)contp);
   if (data->output_vc == NULL) {
-    TSError("[server_transform] TSTransformOutputVConnGet returns NULL");
+    TSError("[%s] TSTransformOutputVConnGet returns NULL", PLUGIN_NAME);
   } else {
     data->output_vio = TSVConnWrite(data->output_vc, contp, data->output_reader, data->content_length);
     if (data->output_vio == NULL) {
-      TSError("[server_transform] TSVConnWrite returns NULL");
+      TSError("[%s] TSVConnWrite returns NULL", PLUGIN_NAME);
     }
   }
 
@@ -278,11 +270,11 @@ transform_bypass(TSCont contp, TransformData *data)
   TSIOBufferReaderConsume(data->input_reader, sizeof(int));
   data->output_vc = TSTransformOutputVConnGet((TSVConn)contp);
   if (data->output_vc == NULL) {
-    TSError("[server_transform] TSTransformOutputVConnGet returns NULL");
+    TSError("[%s] TSTransformOutputVConnGet returns NULL", PLUGIN_NAME);
   } else {
     data->output_vio = TSVConnWrite(data->output_vc, contp, data->input_reader, TSIOBufferReaderAvail(data->input_reader));
     if (data->output_vio == NULL) {
-      TSError("[server_transform] TSVConnWrite returns NULL");
+      TSError("[%s] TSVConnWrite returns NULL", PLUGIN_NAME);
     }
   }
   return 1;
@@ -364,13 +356,13 @@ transform_connect_event(TSCont contp, TransformData *data, TSEvent event, void *
 {
   switch (event) {
   case TS_EVENT_NET_CONNECT:
-    TSDebug("strans", "connected");
+    TSDebug(PLUGIN_NAME, "connected");
 
     data->pending_action = NULL;
     data->server_vc      = (TSVConn)edata;
     return transform_write(contp, data);
   case TS_EVENT_NET_CONNECT_FAILED:
-    TSDebug("strans", "connect failed");
+    TSDebug(PLUGIN_NAME, "connect failed");
     data->pending_action = NULL;
     return transform_bypass(contp, data);
   default:
@@ -510,7 +502,7 @@ transform_handler(TSCont contp, TSEvent event, void *edata)
   /* Check to see if the transformation has been closed by a call to
      TSVConnClose. */
   if (TSVConnClosedGet(contp)) {
-    TSDebug("strans", "transformation closed");
+    TSDebug(PLUGIN_NAME, "transformation closed");
     transform_destroy(contp);
     return 0;
   } else {
@@ -519,10 +511,10 @@ transform_handler(TSCont contp, TSEvent event, void *edata)
 
     data = (TransformData *)TSContDataGet(contp);
     if (data == NULL) {
-      TSError("[server_transform] Didn't get Continuation's Data, ignoring event");
+      TSError("[%s] Didn't get Continuation's Data, ignoring event", PLUGIN_NAME);
       return 0;
     }
-    TSDebug("strans", "transform handler event [%d], data->state = [%d]", event, data->state);
+    TSDebug(PLUGIN_NAME, "transform handler event [%d], data->state = [%d]", event, data->state);
 
     do {
       switch (data->state) {
@@ -587,19 +579,19 @@ server_response_ok(TSHttpTxn txnp)
   TSHttpStatus resp_status;
 
   if (TSHttpTxnServerRespGet(txnp, &bufp, &hdr_loc) != TS_SUCCESS) {
-    TSError("[server_transform] Unable to get handle to Server Response");
+    TSError("[%s] Unable to get handle to Server Response", PLUGIN_NAME);
     return 0;
   }
 
   resp_status = TSHttpHdrStatusGet(bufp, hdr_loc);
   if (TS_HTTP_STATUS_OK == resp_status) {
     if (TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc) != TS_SUCCESS) {
-      TSError("[server_transform] Unable to release handle to server request");
+      TSError("[%s] Unable to release handle to server request", PLUGIN_NAME);
     }
     return 1;
   } else {
     if (TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc) != TS_SUCCESS) {
-      TSError("[server_transform] Unable to release handle to server request");
+      TSError("[%s] Unable to release handle to server request", PLUGIN_NAME);
     }
     return 0;
   }
@@ -642,12 +634,12 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED)
   TSPluginRegistrationInfo info;
   TSCont cont;
 
-  info.plugin_name   = "server-transform";
-  info.vendor_name   = "MyCompany";
-  info.support_email = "ts-api-support@MyCompany.com";
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
   if (TSPluginRegister(&info) != TS_SUCCESS) {
-    TSError("[server_transform] Plugin registration failed");
+    TSError("[%s] Plugin registration failed", PLUGIN_NAME);
   }
 
   /* connect to the echo port on localhost */
diff --git a/example/session-1/session-1.c b/example/session_hooks/session_hooks.c
similarity index 61%
rename from example/session-1/session-1.c
rename to example/session_hooks/session_hooks.c
index 0166aa2..bd62b98 100644
--- a/example/session-1/session-1.c
+++ b/example/session_hooks/session_hooks.c
@@ -1,6 +1,6 @@
 /** @file
 
-  A brief file description
+  An example plugin that demonstrates session hook usage.
 
   @section license License
 
@@ -21,40 +21,33 @@
   limitations under the License.
  */
 
-/* session-1.c: a plugin that illustrates how to use
- *                session hooks
- *
- *
- *  Usage: session-1.so
- *
- */
-
 #include <stdio.h>
 #include "ts/ts.h"
 #include "ts/ink_defs.h"
 
-static INKStat transaction_count;
-static INKStat session_count;
-static INKStat av_transaction;
+#define PLUGIN_NAME "session_hooks"
+
+static int transaction_count_stat;
+static int session_count_stat;
 
 static void
 txn_handler(TSHttpTxn txnp, TSCont contp)
 {
-  int64_t num_txns = 0;
+  TSMgmtInt num_txns = 0;
 
-  INKStatIncrement(transaction_count);
-  num_txns = INKStatIntGet(transaction_count);
-  TSDebug("tag_session", "The number of transactions is %" PRId64, num_txns);
+  TSStatIntIncrement(transaction_count_stat, 1);
+  num_txns = TSStatIntGet(transaction_count_stat);
+  TSDebug(PLUGIN_NAME, "The number of transactions is %" PRId64, num_txns);
 }
 
 static void
 handle_session(TSHttpSsn ssnp, TSCont contp)
 {
-  int64_t num_ssn = 0;
+  TSMgmtInt num_ssn = 0;
 
-  INKStatIncrement(session_count);
-  num_ssn = INKStatIntGet(session_count);
-  TSDebug("tag_session", "The number of sessions is %" PRId64, num_ssn);
+  TSStatIntIncrement(session_count_stat, 1);
+  num_ssn = TSStatIntGet(session_count_stat);
+  TSDebug(PLUGIN_NAME, "The number of sessions is %" PRId64, num_ssn);
   TSHttpSsnHookAdd(ssnp, TS_HTTP_TXN_START_HOOK, contp);
 }
 
@@ -79,7 +72,7 @@ ssn_handler(TSCont contp, TSEvent event, void *edata)
     return 0;
 
   default:
-    TSDebug("tag_session", "In the default case: event = %d", event);
+    TSDebug(PLUGIN_NAME, "In the default case: event = %d", event);
     break;
   }
   return 0;
@@ -91,23 +84,22 @@ TSPluginInit(int argc, const char *argv[])
   TSCont contp;
   TSPluginRegistrationInfo info;
 
-  info.plugin_name   = "session-1";
-  info.vendor_name   = "MyCompany";
-  info.support_email = "ts-api-support@MyCompany.com";
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
   if (TSPluginRegister(&info) != TS_SUCCESS) {
-    TSError("[session-1] Plugin registration failed.\n");
+    TSError("[%s] Plugin registration failed.\n", PLUGIN_NAME);
 
     goto error;
   }
 
-  transaction_count = INKStatCreate("transaction.count", INKSTAT_TYPE_INT64);
-  session_count     = INKStatCreate("session.count", INKSTAT_TYPE_INT64);
-  av_transaction    = INKStatCreate("avg.transactions", INKSTAT_TYPE_FLOAT);
+  transaction_count_stat = TSStatCreate("transaction.count", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+  session_count_stat     = TSStatCreate("session.count", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
 
   contp = TSContCreate(ssn_handler, NULL);
   TSHttpHookAdd(TS_HTTP_SSN_START_HOOK, contp);
 
 error:
-  TSError("[session-1] Plugin not initialized");
+  TSError("[%s] Plugin not initialized", PLUGIN_NAME);
 }
diff --git a/example/ssl-preaccept/ats-util.h b/example/ssl-preaccept/ats-util.h
deleted file mode 100644
index 8973b14..0000000
--- a/example/ssl-preaccept/ats-util.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/** @file
-
-  Copies of some ATS core utilities that aren't exposed to plugins.
-
-  @section license License
-
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
- */
-
-#if !defined(_ats_util_h)
-#define _ats_util_h
-
-#if defined(__cplusplus)
-/** Set data to zero.
-
-    Calls @c memset on @a t with a value of zero and a length of @c
-    sizeof(t). This can be used on ordinary and array variables. While
-    this can be used on variables of intrinsic type it's inefficient.
-
-    @note Because this uses templates it cannot be used on unnamed or
-    locally scoped structures / classes. This is an inherent
-    limitation of templates.
-
-    Examples:
-    @code
-    foo bar; // value.
-    ink_zero(bar); // zero bar.
-
-    foo *bar; // pointer.
-    ink_zero(bar); // WRONG - makes the pointer @a bar zero.
-    ink_zero(*bar); // zero what bar points at.
-
-    foo bar[ZOMG]; // Array of structs.
-    ink_zero(bar); // Zero all structs in array.
-
-    foo *bar[ZOMG]; // array of pointers.
-    ink_zero(bar); // zero all pointers in the array.
-    @endcode
-
- */
-template <typename T>
-inline void
-ink_zero(T &t ///< Object to zero.
-         )
-{
-  memset(&t, 0, sizeof(t));
-}
-#endif /* __cplusplus */
-
-#endif // ats-util.h
diff --git a/example/ssl-preaccept/ssl-preaccept.cc b/example/ssl_preaccept/ssl_preaccept.cc
similarity index 86%
rename from example/ssl-preaccept/ssl-preaccept.cc
rename to example/ssl_preaccept/ssl_preaccept.cc
index 42b8115..925a28d 100644
--- a/example/ssl-preaccept/ssl-preaccept.cc
+++ b/example/ssl_preaccept/ssl_preaccept.cc
@@ -1,6 +1,7 @@
 /** @file
 
-  SSL Preaccept test plugin
+  SSL Preaccept test plugin.
+
   Implements blind tunneling based on the client IP address
   The client ip addresses are specified in the plugin's
   config file as an array of IP addresses or IP address ranges under the
@@ -31,13 +32,14 @@
 #include <ts/ts.h>
 #include <tsconfig/TsValue.h>
 #include <ts/ink_inet.h>
+#include <algorithm>
 #include <getopt.h>
 
 using ts::config::Configuration;
 using ts::config::Value;
 
-#define PN "ssl-preaccept"
-#define PCP "[" PN " Plugin] "
+#define PLUGIN_NAME "ssl-preaccept"
+#define PCP "[" PLUGIN_NAME "] "
 
 namespace
 {
@@ -123,28 +125,27 @@ CB_Pre_Accept(TSCont, TSEvent event, void *edata)
   IpAddr ip_client(TSNetVConnRemoteAddrGet(ssl_vc));
   char buff2[INET6_ADDRSTRLEN];
 
-  TSDebug("skh", "Pre accept callback %p - event is %s, target address %s, client address %s", ssl_vc,
-          event == TS_EVENT_VCONN_PRE_ACCEPT ? "good" : "bad", ip.toString(buff, sizeof(buff)),
-          ip_client.toString(buff2, sizeof(buff2)));
-
   // Not the worlds most efficient address comparison.  For short lists
   // shouldn't be too bad.  If the client IP is in any of the ranges,
   // flip the tunnel to be blind tunneled instead of decrypted and proxied
   bool proxy_tunnel = true;
-  IpRangeQueue::iterator iter;
-  for (iter = ClientBlindTunnelIp.begin(); iter != ClientBlindTunnelIp.end() && proxy_tunnel; iter++) {
-    if (ip_client >= iter->first && ip_client <= iter->second) {
+
+  for (auto const &r : ClientBlindTunnelIp) {
+    if (r.first <= ip_client && ip_client <= r.second) {
       proxy_tunnel = false;
+      break;
     }
   }
+
   if (!proxy_tunnel) {
-    TSDebug("skh", "Blind tunnel");
     // Push everything to blind tunnel
     TSVConnTunnel(ssl_vc);
-  } else {
-    TSDebug("skh", "Proxy tunnel");
   }
 
+  TSDebug(PLUGIN_NAME, "Pre accept callback %p - event is %s, target address %s, client address %s%s", ssl_vc,
+          event == TS_EVENT_VCONN_PRE_ACCEPT ? "good" : "bad", ip.toString(buff, sizeof(buff)),
+          ip_client.toString(buff2, sizeof(buff2)), proxy_tunnel ? "" : " blind tunneled");
+
   // All done, reactivate things
   TSVConnReenable(ssl_vc);
   return TS_SUCCESS;
@@ -163,9 +164,9 @@ TSPluginInit(int argc, const char *argv[])
     {const_cast<char *>("config"), required_argument, nullptr, 'c'}, {nullptr, no_argument, nullptr, '\0'},
   };
 
-  info.plugin_name   = const_cast<char *>("SSL Preaccept test");
-  info.vendor_name   = const_cast<char *>("Network Geographics");
-  info.support_email = const_cast<char *>("shinrich@network-geographics.com");
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
   int opt = 0;
   while (opt >= 0) {
@@ -180,7 +181,7 @@ TSPluginInit(int argc, const char *argv[])
   if (ConfigPath.length() == 0) {
     static const char *const DEFAULT_CONFIG_PATH = "ssl_preaccept.config";
     ConfigPath                                   = std::string(TSConfigDirGet()) + '/' + std::string(DEFAULT_CONFIG_PATH);
-    TSDebug(PN, "No config path set in arguments, using default: %s", DEFAULT_CONFIG_PATH);
+    TSDebug(PLUGIN_NAME, "No config path set in arguments, using default: %s", DEFAULT_CONFIG_PATH);
   }
 
   if (TS_SUCCESS != TSPluginRegister(&info)) {
@@ -199,7 +200,7 @@ TSPluginInit(int argc, const char *argv[])
   if (!success) {
     TSError(PCP "not initialized");
   }
-  TSDebug(PN, "Plugin %s", success ? "online" : "offline");
+  TSDebug(PLUGIN_NAME, "Plugin %s", success ? "online" : "offline");
 
   return;
 }
diff --git a/example/ssl-preaccept/ssl_preaccept.config b/example/ssl_preaccept/ssl_preaccept.config
similarity index 100%
rename from example/ssl-preaccept/ssl_preaccept.config
rename to example/ssl_preaccept/ssl_preaccept.config
diff --git a/example/ssl-sni/ssl-sni.cc b/example/ssl_sni/ssl_sni.cc
similarity index 84%
rename from example/ssl-sni/ssl-sni.cc
rename to example/ssl_sni/ssl_sni.cc
index 110f861..2d9e1aa 100644
--- a/example/ssl-sni/ssl-sni.cc
+++ b/example/ssl_sni/ssl_sni.cc
@@ -1,6 +1,7 @@
-/**
-  @file
-  SSL Preaccept test plugin
+/** @file
+
+  SSL Preaccept test plugin.
+
   Implements blind tunneling based on the client IP address
   The client ip addresses are specified in the plugin's
   config file as an array of IP addresses or IP address ranges under the
@@ -29,7 +30,6 @@
 #include <memory.h>
 #include <cinttypes>
 #include <ts/ts.h>
-#include "ts/ink_config.h"
 #include <tsconfig/TsValue.h>
 #include <openssl/ssl.h>
 #include <getopt.h>
@@ -37,10 +37,8 @@
 using ts::config::Configuration;
 using ts::config::Value;
 
-#define PN "ssl-sni-test"
-#define PCP "[" PN " Plugin] "
-
-#if TS_USE_TLS_SNI
+#define PLUGIN_NAME "ssl_sni"
+#define PCP "[" PLUGIN_NAME "] "
 
 namespace
 {
@@ -93,7 +91,7 @@ CB_servername(TSCont /* contp */, TSEvent /* event */, void *edata)
     if (servername_len >= facebook_name_len) {
       const char *server_ptr = servername + (servername_len - facebook_name_len);
       if (strcmp(server_ptr, "facebook.com") == 0) {
-        TSDebug("skh", "Blind tunnel from SNI callback");
+        TSDebug(PLUGIN_NAME, "Blind tunnel from SNI callback");
         TSVConnTunnel(ssl_vc);
         // Don't reenable to ensure that we break out of the
         // SSL handshake processing
@@ -102,14 +100,14 @@ CB_servername(TSCont /* contp */, TSEvent /* event */, void *edata)
     }
     // If the name is yahoo, look for a context for safelyfiled and use that here
     if (strcmp("www.yahoo.com", servername) == 0) {
-      TSDebug("skh", "SNI name is yahoo ssl obj is %p", sslobj);
+      TSDebug(PLUGIN_NAME, "SNI name is yahoo ssl obj is %p", sslobj);
       if (sslobj) {
         TSSslContext ctxobj = TSSslContextFindByName("safelyfiled.com");
         if (ctxobj != nullptr) {
-          TSDebug("skh", "Found cert for safelyfiled");
+          TSDebug(PLUGIN_NAME, "Found cert for safelyfiled");
           SSL_CTX *ctx = reinterpret_cast<SSL_CTX *>(ctxobj);
           SSL_set_SSL_CTX(ssl, ctx);
-          TSDebug("skh", "SNI plugin cb: replace SSL CTX");
+          TSDebug(PLUGIN_NAME, "SNI plugin cb: replace SSL CTX");
         }
       }
     }
@@ -133,9 +131,9 @@ TSPluginInit(int argc, const char *argv[])
     {const_cast<char *>("config"), required_argument, nullptr, 'c'}, {nullptr, no_argument, nullptr, '\0'},
   };
 
-  info.plugin_name   = const_cast<char *>("SSL SNI callback test");
-  info.vendor_name   = const_cast<char *>("Network Geographics");
-  info.support_email = const_cast<char *>("shinrich@network-geographics.com");
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
   int opt = 0;
   while (opt >= 0) {
@@ -150,7 +148,7 @@ TSPluginInit(int argc, const char *argv[])
   if (ConfigPath.length() == 0) {
     static const char *const DEFAULT_CONFIG_PATH = "ssl_sni.config";
     ConfigPath                                   = std::string(TSConfigDirGet()) + '/' + std::string(DEFAULT_CONFIG_PATH);
-    TSDebug(PN, "No config path set in arguments, using default: %s", DEFAULT_CONFIG_PATH);
+    TSDebug(PLUGIN_NAME, "No config path set in arguments, using default: %s", DEFAULT_CONFIG_PATH);
   }
 
   if (TS_SUCCESS != TSPluginRegister(&info)) {
@@ -169,17 +167,7 @@ TSPluginInit(int argc, const char *argv[])
   if (!success) {
     TSError(PCP "not initialized");
   }
-  TSDebug(PN, "Plugin %s", success ? "online" : "offline");
+  TSDebug(PLUGIN_NAME, "Plugin %s", success ? "online" : "offline");
 
   return;
 }
-
-#else // ! TS_USE_TLS_SNI
-
-void
-TSPluginInit(int, const char *[])
-{
-  TSError(PCP "requires TLS SNI which is not available");
-}
-
-#endif // TS_USE_TLS_SNI
diff --git a/example/ssl-sni/ssl_sni.config b/example/ssl_sni/ssl_sni.config
similarity index 100%
rename from example/ssl-sni/ssl_sni.config
rename to example/ssl_sni/ssl_sni.config
diff --git a/example/ssl-sni-whitelist/ssl-sni-whitelist.cc b/example/ssl_sni_whitelist/ssl_sni_whitelist.cc
similarity index 87%
rename from example/ssl-sni-whitelist/ssl-sni-whitelist.cc
rename to example/ssl_sni_whitelist/ssl_sni_whitelist.cc
index c4dd585..49c8f3f 100644
--- a/example/ssl-sni-whitelist/ssl-sni-whitelist.cc
+++ b/example/ssl_sni_whitelist/ssl_sni_whitelist.cc
@@ -35,10 +35,8 @@
 using ts::config::Configuration;
 using ts::config::Value;
 
-#define PN "ssl-sni-whitelist"
-#define PCP "[" PN " Plugin] "
-
-#if TS_USE_TLS_SNI
+#define PLUGIN_NAME "ssl_sni_whitelist"
+#define PCP "[" PLUGIN_NAME "] "
 
 namespace
 {
@@ -112,9 +110,9 @@ TSPluginInit(int argc, const char *argv[])
     {const_cast<char *>("config"), required_argument, nullptr, 'c'}, {nullptr, no_argument, nullptr, '\0'},
   };
 
-  info.plugin_name   = const_cast<char *>("SSL SNI whitelist");
-  info.vendor_name   = const_cast<char *>("Network Geographics");
-  info.support_email = const_cast<char *>("shinrich@network-geographics.com");
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
   int opt = 0;
   while (opt >= 0) {
@@ -129,7 +127,7 @@ TSPluginInit(int argc, const char *argv[])
   if (ConfigPath.length() == 0) {
     static const char *const DEFAULT_CONFIG_PATH = "ssl_sni_whitelist.config";
     ConfigPath                                   = std::string(TSConfigDirGet()) + '/' + std::string(DEFAULT_CONFIG_PATH);
-    TSDebug(PN, "No config path set in arguments, using default: %s", DEFAULT_CONFIG_PATH);
+    TSDebug(PLUGIN_NAME, "No config path set in arguments, using default: %s", DEFAULT_CONFIG_PATH);
   }
 
   if (TS_SUCCESS != TSPluginRegister(&info)) {
@@ -148,17 +146,7 @@ TSPluginInit(int argc, const char *argv[])
   if (!success) {
     TSError(PCP "not initialized");
   }
-  TSDebug(PN, "Plugin %s", success ? "online" : "offline");
+  TSDebug(PLUGIN_NAME, "Plugin %s", success ? "online" : "offline");
 
   return;
 }
-
-#else // ! TS_USE_TLS_SNI
-
-void
-TSPluginInit(int, const char *[])
-{
-  TSError(PCP "requires TLS SNI which is not available");
-}
-
-#endif // TS_USE_TLS_SNI
diff --git a/example/ssl-sni-whitelist/ssl_sni_whitelist.config b/example/ssl_sni_whitelist/ssl_sni_whitelist.config
similarity index 100%
rename from example/ssl-sni-whitelist/ssl_sni_whitelist.config
rename to example/ssl_sni_whitelist/ssl_sni_whitelist.config
diff --git a/example/statistic/statistic.cc b/example/statistic/statistic.cc
index d691042..09ee047 100644
--- a/example/statistic/statistic.cc
+++ b/example/statistic/statistic.cc
@@ -1,5 +1,15 @@
 /** @file
 
+  Statistics example plugin.
+
+  This plugin demonstrates the statistics API and also serves as a
+  regression test for TS-4840. If traffic_server is restarted, a
+  plugin ought to be able to safely reattach to its statistics.
+
+  This source is included as an example in the developers so if you
+  change it, you may have to update the line numbers on
+  doc/developer-guide/plugins/adding-statistics.en.rst
+
   @section license License
 
   Licensed to the Apache Software Foundation (ASF) under one
@@ -19,14 +29,6 @@
   limitations under the License.
  */
 
-// This plugin demonstrates the statistics API and also serves as a
-// regression test for TS-4840. If traffic_server is restarted, a
-// plugin ought to be able to safely reattach to its statistics.
-//
-// This source is included as an example in the developers so if you
-// change it, you may have to update the line numbers on
-// doc/developer-guide/plugins/adding-statistics.en.rst
-
 #include <ts/ts.h>
 #include <cinttypes>
 #include <ctime>
diff --git a/example/thread-1/readme.txt b/example/thread_1/readme.txt
similarity index 96%
rename from example/thread-1/readme.txt
rename to example/thread_1/readme.txt
index 0372e92..4752797 100644
--- a/example/thread-1/readme.txt
+++ b/example/thread_1/readme.txt
@@ -1,5 +1,3 @@
-About thread-plugin.c
-
 This plugin sets up a callback to a continuation whose
 handler function creates a thread.
 
diff --git a/example/thread-1/thread-1.c b/example/thread_1/thread_1.c
similarity index 78%
rename from example/thread-1/thread-1.c
rename to example/thread_1/thread_1.c
index 28ea799..b7979d4 100644
--- a/example/thread-1/thread-1.c
+++ b/example/thread_1/thread_1.c
@@ -1,6 +1,8 @@
 /** @file
 
-  A brief file description
+  An example plugin that creates a thread.
+
+  The thread is created on the DNS lookup hook and simply re-enables the transaction from the thread.
 
   @section license License
 
@@ -21,22 +23,14 @@
   limitations under the License.
  */
 
-/* thread-1.c:  an example program that creates a thread
- *
- *
- *
- *	Usage:
- *	  thread-1.so
- *
- *
- */
-
 #include <stdio.h>
 #include <string.h>
 
 #include "ts/ts.h"
 #include "ts/ink_defs.h"
 
+#define PLUGIN_NAME "thread_1"
+
 static void *
 reenable_txn(void *data)
 {
@@ -55,7 +49,7 @@ thread_plugin(TSCont contp ATS_UNUSED, TSEvent event, void *edata)
      * If the thread has not been created successfully, assert.
      */
     if (!TSThreadCreate(reenable_txn, edata)) {
-      TSReleaseAssert(!"Failure in thread creation");
+      TSReleaseAssert(!PLUGIN_NAME " - Failure in thread creation");
     }
     return 0;
   default:
@@ -69,12 +63,12 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED)
 {
   TSPluginRegistrationInfo info;
 
-  info.plugin_name   = "thread-1";
-  info.vendor_name   = "MyCompany";
-  info.support_email = "ts-api-support@MyCompany.com";
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
   if (TSPluginRegister(&info) != TS_SUCCESS) {
-    TSError("[thread-1] Plugin registration failed");
+    TSError("[%s] Plugin registration failed", PLUGIN_NAME);
   }
 
   TSHttpHookAdd(TS_HTTP_OS_DNS_HOOK, TSContCreate(thread_plugin, NULL));
diff --git a/example/thread-pool/README.txt b/example/thread_pool/README.txt
similarity index 97%
rename from example/thread-pool/README.txt
rename to example/thread_pool/README.txt
index 78a4573..da64ae4 100644
--- a/example/thread-pool/README.txt
+++ b/example/thread_pool/README.txt
@@ -108,7 +108,7 @@ Plugin Testing
  Sample include file generation
  ------------------------------
  A basic utility ('gen')to generate files to be inserted is provided
- in the directory thread-pool/include.
+ in the directory thread_pool/include.
  This create text files of various sizes.
  Compile gen the execute gen_inc.sh to generate files:
  > cd include
@@ -128,6 +128,5 @@ Plugin Testing
  The rate of responses with X-Psi header is configurable thru a SDKTest config file.
 
  A SDKTest server plugin as well as a SDKTest configuration file
- are provided in the directory thread-pool/test/SDKTest.
+ are provided in the directory thread_pool/test/SDKTest.
  Refer to the SDKTest manual for detailed setup instructions.
-
diff --git a/example/thread-pool/TESTPLAN.txt b/example/thread_pool/TESTPLAN.txt
similarity index 90%
rename from example/thread-pool/TESTPLAN.txt
rename to example/thread_pool/TESTPLAN.txt
index 3e5cbc4..a884104 100644
--- a/example/thread-pool/TESTPLAN.txt
+++ b/example/thread_pool/TESTPLAN.txt
@@ -1,4 +1,4 @@
-	Thread-Pool Sample Plugin Testing
+	Thread_Pool Sample Plugin Testing
 	=================================
 
 
@@ -10,7 +10,7 @@ boundary testing.
 
 List of tests:
 --------------
-Test case are under thread-pool/test/SynTest/Tests/Psi
+Test case are under thread_pool/test/SynTest/Tests/Psi
 1.cfg:Description: PSI Plugin - Include tag at the beginning of doc
 2.cfg:Description: PSI Plugin - include tag middle of body doc
 3.cfg:Description: PSI Plugin - include tag end of body doc
@@ -27,14 +27,14 @@ Test case are under thread-pool/test/SynTest/Tests/Psi
 
 SynTest config file:
 --------------------
-SynTest config files are under thread-pool/test/SynTest
+SynTest config files are under thread_pool/test/SynTest
  - system.cfg: master SynTest config file
  - tests_psi.cfg: test plan for SynTest
 
 Tests setup:
 ------------
  - Update plugin.config to add the psi plugin
- - Generate the include files using the gen_inc.sh tool in thread-pool/include
+ - Generate the include files using the gen_inc.sh tool in thread_pool/include
  - Copy the include files into $TS_HOME/etc/trafficserver/plugins/include
  - Start TS
  - Start SynTest
@@ -52,7 +52,7 @@ SDKTest is used for load testing.
 A SDKTest server plugin is required to exercise the plugin (the server
 must send back responses with 'X-Psi' header).
 
-The SDKTest server plugin is located under thread-pool/test/SDKTest
+The SDKTest server plugin is located under thread_pool/test/SDKTest
 A SDKTest configuration file is also provided (allows to change the ratio
 of X-Psi responses).
 
@@ -60,7 +60,7 @@ Tests setup:
 ------------
 TS setup:
  - Update plugin.config to add the psi plugin
- - Generate the include files using the gen_inc.sh tool in thread-pool/include
+ - Generate the include files using the gen_inc.sh tool in thread_pool/include
  - Copy the include files into $TS_HOME/etc/trafficserver/plugins/include
  - Start TS
 
diff --git a/example/thread-pool/include/Makefile.am b/example/thread_pool/include/Makefile.am
similarity index 100%
copy from example/thread-pool/include/Makefile.am
copy to example/thread_pool/include/Makefile.am
diff --git a/example/thread-pool/include/gen.c b/example/thread_pool/include/gen.c
similarity index 100%
rename from example/thread-pool/include/gen.c
rename to example/thread_pool/include/gen.c
diff --git a/example/thread-pool/include/gen_inc.sh b/example/thread_pool/include/gen_inc.sh
similarity index 100%
rename from example/thread-pool/include/gen_inc.sh
rename to example/thread_pool/include/gen_inc.sh
diff --git a/example/thread-pool/psi.c b/example/thread_pool/psi.c
similarity index 94%
rename from example/thread-pool/psi.c
rename to example/thread_pool/psi.c
index 02bc801..a3b73a8 100644
--- a/example/thread-pool/psi.c
+++ b/example/thread_pool/psi.c
@@ -2,6 +2,16 @@
 
   Proxy Side Include plugin (PSI)
 
+  Synopsis:
+
+  This plugin allows to insert the content of a file stored on the proxy disk
+  into the body of an html response.
+
+  The plugin illustrates how to use a pool of threads in order to do blocking
+  calls (here, some disk i/o) in a Traffic Server plugin.
+
+  Further details: Refer to README file.
+
   @section license License
 
   Licensed to the Apache Software Foundation (ASF) under one
@@ -21,41 +31,16 @@
   limitations under the License.
  */
 
-/*
- *
- *
- *
- *	Usage:
- *	  psi.so
- *
- *  Proxy Side Include plugin (PSI)
- *
- *   Synopsis:
- *
- *  This plugin allows to insert the content of a file stored on the proxy disk
- *  into the body of an html response.
- *
- *  The plugin illustrates how to use a pool of threads in order to do blocking
- *  calls (here, some disk i/o) in a Traffic Server plugin.
- *
- *
- *   Details:
- *
- *  Refer to README file.
- *
- */
-
 #include <stdio.h>
 #include <stdlib.h>
 #include <limits.h>
 #include <string.h>
+#include <sys/param.h>
 
 #include "ts/ts.h"
 #include "thread.h"
 #include "ts/ink_defs.h"
 
-#define DBG_TAG "xpsi"
-
 /* This is the number of threads spawned by the plugin.
    Should be tuned based on performance requirements,
    blocking calls duration, etc... */
@@ -167,7 +152,7 @@ cont_data_alloc()
 static void
 cont_data_destroy(ContData *data)
 {
-  TSDebug(DBG_TAG, "Destroying continuation data");
+  TSDebug(PLUGIN_NAME, "Destroying continuation data");
   if (data) {
     TSAssert(data->magic == MAGIC_ALIVE);
     if (data->output_reader) {
@@ -242,13 +227,13 @@ strsearch_ioreader(TSIOBufferReader reader, const char *pattern, int *nparse)
 
   *nparse -= index; /* Adjust nparse so it doesn't include matching chars */
   if (index == slen) {
-    TSDebug(DBG_TAG, "strfind: match for %s at position %d", pattern, *nparse);
+    TSDebug(PLUGIN_NAME, "strfind: match for %s at position %d", pattern, *nparse);
     return STR_SUCCESS;
   } else if (index > 0) {
-    TSDebug(DBG_TAG, "strfind: partial match for %s at position %d", pattern, *nparse);
+    TSDebug(PLUGIN_NAME, "strfind: partial match for %s at position %d", pattern, *nparse);
     return STR_PARTIAL;
   } else {
-    TSDebug(DBG_TAG, "strfind no match for %s", pattern);
+    TSDebug(PLUGIN_NAME, "strfind no match for %s", pattern);
     return STR_FAIL;
   }
 }
@@ -317,7 +302,7 @@ strextract_ioreader(TSIOBufferReader reader, int offset, const char *end_pattern
 
   /* Error, could not read end of filename */
   if (buf_idx >= PSI_FILENAME_MAX_SIZE) {
-    TSDebug(DBG_TAG, "strextract: filename too long");
+    TSDebug(PLUGIN_NAME, "strextract: filename too long");
     *buflen = 0;
     return STR_FAIL;
   }
@@ -327,12 +312,12 @@ strextract_ioreader(TSIOBufferReader reader, int offset, const char *end_pattern
     /* Nul terminate the filename, remove the end_pattern copied into the buffer */
     *buflen         = buf_idx - plen;
     buffer[*buflen] = '\0';
-    TSDebug(DBG_TAG, "strextract: filename = |%s|", buffer);
+    TSDebug(PLUGIN_NAME, "strextract: filename = |%s|", buffer);
     return STR_SUCCESS;
   }
   /* End of filename not yet reached we need to read some more data */
   else {
-    TSDebug(DBG_TAG, "strextract: partially extracted filename");
+    TSDebug(PLUGIN_NAME, "strextract: partially extracted filename");
     *buflen = buf_idx - p_idx;
     return STR_PARTIAL;
   }
@@ -485,7 +470,7 @@ psi_include(TSCont contp, void *edata ATS_UNUSED)
 
   /* Read the include file and copy content into iobuffer */
   if ((filep = TSfopen(inc_file, "r")) != NULL) {
-    TSDebug(DBG_TAG, "Reading include file %s", inc_file);
+    TSDebug(PLUGIN_NAME, "Reading include file %s", inc_file);
 
     while (TSfgets(filep, buf, BUFFER_SIZE) != NULL) {
       TSIOBufferBlock block;
@@ -564,7 +549,7 @@ wake_up_streams(TSCont contp)
     TSVIOReenable(data->output_vio);
     TSContCall(TSVIOContGet(input_vio), TS_EVENT_VCONN_WRITE_READY, input_vio);
   } else {
-    TSDebug(DBG_TAG, "Total bytes produced by transform = %d", data->transform_bytes);
+    TSDebug(PLUGIN_NAME, "Total bytes produced by transform = %d", data->transform_bytes);
     TSVIONBytesSet(data->output_vio, data->transform_bytes);
     TSVIOReenable(data->output_vio);
     TSContCall(TSVIOContGet(input_vio), TS_EVENT_VCONN_WRITE_COMPLETE, input_vio);
@@ -615,7 +600,7 @@ handle_transform(TSCont contp)
 
   /* If the input VIO's buffer is NULL, the transformation is over */
   if (!TSVIOBufferGet(input_vio)) {
-    TSDebug(DBG_TAG, "input_vio NULL, terminating transformation");
+    TSDebug(PLUGIN_NAME, "input_vio NULL, terminating transformation");
     TSVIONBytesSet(data->output_vio, data->transform_bytes);
     TSVIOReenable(data->output_vio);
     return 1;
@@ -664,7 +649,7 @@ handle_transform(TSCont contp)
         Job *new_job;
         /* Add a request to include a file into the jobs queue.. */
         /* We'll be called back once it's done with an EVENT_IMMEDIATE */
-        TSDebug(DBG_TAG, "Psi filename extracted, adding an include job to thread queue");
+        TSDebug(PLUGIN_NAME, "Psi filename extracted, adding an include job to thread queue");
         data->state = STATE_READ_PSI;
 
         /* Create a new job request and add it to the queue */
@@ -718,7 +703,7 @@ dump_psi(TSCont contp)
     if (psi_output_len > 0) {
       data->transform_bytes += psi_output_len;
 
-      TSDebug(DBG_TAG, "Inserting %d bytes from include file", psi_output_len);
+      TSDebug(PLUGIN_NAME, "Inserting %d bytes from include file", psi_output_len);
       /* TODO: Should we check the return value of TSIOBufferCopy() ? */
       TSIOBufferCopy(TSVIOBufferGet(data->output_vio), data->psi_reader, psi_output_len, 0);
       /* Consume all the output data */
@@ -881,7 +866,7 @@ transformable(TSHttpTxn txnp)
 
     field_loc = TSMimeHdrFieldFind(bufp, hdr_loc, TS_MIME_FIELD_CONTENT_TYPE, -1);
     if (field_loc == TS_NULL_MLOC) {
-      TSError("[psi] Unable to search Content-Type field");
+      TSError("[%s] Unable to search Content-Type field", PLUGIN_NAME);
       TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc);
       return 0;
     }
@@ -948,7 +933,7 @@ read_response_handler(TSCont contp ATS_UNUSED, TSEvent event, void *edata)
   switch (event) {
   case TS_EVENT_HTTP_READ_RESPONSE_HDR:
     if (transformable(txnp)) {
-      TSDebug(DBG_TAG, "Add a transformation");
+      TSDebug(PLUGIN_NAME, "Add a transformation");
       transform_add(txnp);
     }
     TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
@@ -982,7 +967,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED)
   info.support_email = "";
 
   if (TSPluginRegister(&info) != TS_SUCCESS) {
-    TSError("[psi] Plugin registration failed");
+    TSError("[%s] Plugin registration failed", PLUGIN_NAME);
   }
 
   /* Initialize the psi directory = <plugin_path>/include */
@@ -991,7 +976,7 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED)
   /* create an TSTextLogObject to log any psi include */
   retval = TSTextLogObjectCreate("psi", TS_LOG_MODE_ADD_TIMESTAMP, &log);
   if (retval == TS_ERROR) {
-    TSError("[psi] Failed creating log for psi plugin");
+    TSError("[%s] Failed creating log for psi plugin", PLUGIN_NAME);
     log = NULL;
   }
 
@@ -1003,11 +988,11 @@ TSPluginInit(int argc ATS_UNUSED, const char *argv[] ATS_UNUSED)
     char *thread_name = (char *)TSmalloc(64);
     sprintf(thread_name, "Thread[%d]", i);
     if (!TSThreadCreate((TSThreadFunc)thread_loop, thread_name)) {
-      TSError("[psi] Failed creating threads");
+      TSError("[%s] Failed creating threads", PLUGIN_NAME);
       return;
     }
   }
 
   TSHttpHookAdd(TS_HTTP_READ_RESPONSE_HDR_HOOK, TSContCreate(read_response_handler, TSMutexCreate()));
-  TSDebug(DBG_TAG, "Plugin started");
+  TSDebug(PLUGIN_NAME, "Plugin started");
 }
diff --git a/example/thread-pool/test/SDKTest/SDKtest_server.config b/example/thread_pool/test/SDKTest/SDKtest_server.config
similarity index 100%
rename from example/thread-pool/test/SDKTest/SDKtest_server.config
rename to example/thread_pool/test/SDKTest/SDKtest_server.config
diff --git a/example/thread-pool/test/SDKTest/psi_server.c b/example/thread_pool/test/SDKTest/psi_server.c
similarity index 100%
rename from example/thread-pool/test/SDKTest/psi_server.c
rename to example/thread_pool/test/SDKTest/psi_server.c
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/1.cfg b/example/thread_pool/test/SynTest/Tests/Psi/1.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/1.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/1.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/10.cfg b/example/thread_pool/test/SynTest/Tests/Psi/10.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/10.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/10.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/11.cfg b/example/thread_pool/test/SynTest/Tests/Psi/11.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/11.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/11.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/12.cfg b/example/thread_pool/test/SynTest/Tests/Psi/12.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/12.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/12.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/13.cfg b/example/thread_pool/test/SynTest/Tests/Psi/13.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/13.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/13.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/2.cfg b/example/thread_pool/test/SynTest/Tests/Psi/2.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/2.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/2.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/3.cfg b/example/thread_pool/test/SynTest/Tests/Psi/3.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/3.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/3.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/4.cfg b/example/thread_pool/test/SynTest/Tests/Psi/4.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/4.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/4.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/5.cfg b/example/thread_pool/test/SynTest/Tests/Psi/5.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/5.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/5.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/6.cfg b/example/thread_pool/test/SynTest/Tests/Psi/6.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/6.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/6.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/7.cfg b/example/thread_pool/test/SynTest/Tests/Psi/7.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/7.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/7.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/8.cfg b/example/thread_pool/test/SynTest/Tests/Psi/8.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/8.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/8.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/9.cfg b/example/thread_pool/test/SynTest/Tests/Psi/9.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/9.cfg
rename to example/thread_pool/test/SynTest/Tests/Psi/9.cfg
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc10_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc10_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc10_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc10_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc11_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc11_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc11_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc11_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc12_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc12_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc12_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc12_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc13_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc13_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc13_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc13_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc1_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc1_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc1_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc1_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc2_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc2_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc2_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc2_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc3_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc3_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc3_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc3_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc4_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc4_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc4_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc4_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc5_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc5_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc5_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc5_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc6_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc6_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc6_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc6_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc7_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc7_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc7_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc7_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc8_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc8_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc8_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc8_file.txt
diff --git a/example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc9_file.txt b/example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc9_file.txt
similarity index 100%
rename from example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc9_file.txt
rename to example/thread_pool/test/SynTest/Tests/Psi/psi_files/tc9_file.txt
diff --git a/example/thread-pool/test/SynTest/system.cfg b/example/thread_pool/test/SynTest/system.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/system.cfg
rename to example/thread_pool/test/SynTest/system.cfg
diff --git a/example/thread-pool/test/SynTest/tests_psi.cfg b/example/thread_pool/test/SynTest/tests_psi.cfg
similarity index 100%
rename from example/thread-pool/test/SynTest/tests_psi.cfg
rename to example/thread_pool/test/SynTest/tests_psi.cfg
diff --git a/example/thread-pool/thread.c b/example/thread_pool/thread.c
similarity index 96%
rename from example/thread-pool/thread.c
rename to example/thread_pool/thread.c
index 6a6c1bd..be41cfc 100644
--- a/example/thread-pool/thread.c
+++ b/example/thread_pool/thread.c
@@ -28,8 +28,6 @@
 #include "thread.h"
 #include "ts/ink_defs.h"
 
-#define DBGTAG "xthread"
-
 struct timespec tp1;
 struct timespec tp2;
 
@@ -77,7 +75,7 @@ add_to_queue(Queue *q, void *data)
     TSMutexUnlock(q->mutex);
 
     if (n > MAX_JOBS_ALARM) {
-      TSError("[thread_pool] Warning:Too many jobs in plugin thread pool queue (%d). Maximum allowed is %d", n, MAX_JOBS_ALARM);
+      TSError("[%s] Warning:Too many jobs in plugin thread pool queue (%d). Maximum allowed is %d", PLUGIN_NAME, n, MAX_JOBS_ALARM);
     }
   }
 }
diff --git a/example/thread-pool/thread.h b/example/thread_pool/thread.h
similarity index 98%
rename from example/thread-pool/thread.h
rename to example/thread_pool/thread.h
index 54df3ec..1e6ed99 100644
--- a/example/thread-pool/thread.h
+++ b/example/thread_pool/thread.h
@@ -24,6 +24,8 @@
 #ifndef _THREAD_H_
 #define _THREAD_H_
 
+#define PLUGIN_NAME "psi"
+
 #define MAGIC_ALIVE 0xfeedbabe
 #define MAGIC_DEAD 0xdeadbeef
 
diff --git a/example/txn-data-sink/txn-data-sink.c b/example/txn_data_sink/txn_data_sink.c
similarity index 98%
rename from example/txn-data-sink/txn-data-sink.c
rename to example/txn_data_sink/txn_data_sink.c
index 3f22fa1..60d10de 100644
--- a/example/txn-data-sink/txn-data-sink.c
+++ b/example/txn_data_sink/txn_data_sink.c
@@ -36,8 +36,8 @@
 #define __STDC_FORMAT_MACROS 1
 #include <inttypes.h>
 
-#define PLUGIN_NAME "txn-data-sink"
-#define PCP "[" PLUGIN_NAME "]"
+#define PLUGIN_NAME "txn_data_sink"
+#define PCP "[" PLUGIN_NAME "] "
 
 // Activate the data sink if this field is present in the request.
 static const char FLAG_MIME_FIELD[] = "TS-Agent";
diff --git a/example/version/version.c b/example/version/version.c
index 1b82ca9..525c241 100644
--- a/example/version/version.c
+++ b/example/version/version.c
@@ -1,6 +1,6 @@
 /** @file
 
-  an example plugin showing off how to use versioning
+  An example plugin showing off how to use versioning.
 
   @section license License
 
@@ -24,6 +24,8 @@
 #include <stdio.h>
 #include <ts/ts.h>
 
+#define PLUGIN_NAME "version"
+
 void
 TSPluginInit(int argc, const char *argv[])
 {
@@ -33,7 +35,7 @@ TSPluginInit(int argc, const char *argv[])
   // Get the version:
   const char *ts_version = TSTrafficServerVersionGet();
   if (!ts_version) {
-    TSError("[version] Can't get Traffic Server verion.\n");
+    TSError("[%s] Can't get Traffic Server verion.", PLUGIN_NAME);
     return;
   }
 
@@ -43,14 +45,14 @@ TSPluginInit(int argc, const char *argv[])
   int patch_ts_version = 0;
 
   if (sscanf(ts_version, "%d.%d.%d", &major_ts_version, &minor_ts_version, &patch_ts_version) != 3) {
-    TSError("[version] Can't extract verions.\n");
+    TSError("[%s] Can't extract verions.", PLUGIN_NAME);
     return;
   }
 
   TSPluginRegistrationInfo info;
-  info.plugin_name   = "version-plugin";
-  info.vendor_name   = "MyCompany";
-  info.support_email = "ts-api-support@MyCompany.com";
+  info.plugin_name   = PLUGIN_NAME;
+  info.vendor_name   = "Apache Software Foundation";
+  info.support_email = "dev@trafficserver.apache.org";
 
 // partial compilation
 #if (TS_VERSION_NUMBER < 3000000)
@@ -60,9 +62,8 @@ TSPluginInit(int argc, const char *argv[])
 #else
   if (TSPluginRegister(&info) != TS_SUCCESS) {
 #endif
-    TSError("[version] Plugin registration failed. \n");
+    TSError("[%s] Plugin registration failed.", PLUGIN_NAME);
   }
 
-  TSDebug("debug-version-plugin", "Running in Apache Traffic Server: v%d.%d.%d", major_ts_version, minor_ts_version,
-          patch_ts_version);
+  TSDebug(PLUGIN_NAME, "Running in Apache Traffic Server: v%d.%d.%d", major_ts_version, minor_ts_version, patch_ts_version);
 }
diff --git a/iocore/cache/Cache.cc b/iocore/cache/Cache.cc
index daa6ea9..6b5b3cc 100644
--- a/iocore/cache/Cache.cc
+++ b/iocore/cache/Cache.cc
@@ -2215,7 +2215,7 @@ upgrade_doc_version(Ptr<IOBufferData> &buf)
         char *dst;
         char *hdr_limit = doc->data();
         HTTPInfo::FragOffset *frags =
-          reinterpret_cast<HTTPInfo::FragOffset *>(static_cast<char *>(buf->data()) + cache_bc::sizeofDoc_v23);
+          reinterpret_cast<HTTPInfo::FragOffset *>(static_cast<char *>(buf->data()) + sizeof(cache_bc::Doc_v23));
         int frag_count      = doc->_flen / sizeof(HTTPInfo::FragOffset);
         size_t n            = 0;
         size_t content_size = doc->data_len();
@@ -2229,10 +2229,10 @@ upgrade_doc_version(Ptr<IOBufferData> &buf)
 
         src = buf->data();
         dst = d_buf->data();
-        memcpy(dst, src, sizeofDoc);
-        src += sizeofDoc + doc->_flen;
-        dst += sizeofDoc;
-        n -= sizeofDoc;
+        memcpy(dst, src, sizeof(Doc));
+        src += sizeof(Doc) + doc->_flen;
+        dst += sizeof(Doc);
+        n -= sizeof(Doc);
 
         // We copy the fragment table iff there is a fragment table and there is only one alternate.
         if (frag_count > 0 && cache_bc::HTTPInfo_v21::marshalled_length(src) > doc->hlen) {
@@ -2247,7 +2247,7 @@ upgrade_doc_version(Ptr<IOBufferData> &buf)
           // Must update new Doc::len and Doc::hlen
           // dst points at the first byte of the content, or one past the last byte of the alt header.
           d_doc->len  = (dst - reinterpret_cast<char *>(d_doc)) + content_size;
-          d_doc->hlen = (dst - reinterpret_cast<char *>(d_doc)) - sizeofDoc;
+          d_doc->hlen = (dst - reinterpret_cast<char *>(d_doc)) - sizeof(Doc);
           buf         = d_buf; // replace original buffer with new buffer.
         } else {
           zret = false;
@@ -3366,7 +3366,7 @@ HTTPInfo_v21::copy_and_upgrade_unmarshalled_to_v23(char *&dst, char *&src, size_
   // Extra data is fragment table - set that if we have it.
   if (n_frags) {
     static size_t const IFT_SIZE = HTTPCacheAlt_v23::N_INTEGRAL_FRAG_OFFSETS * sizeof(FragOffset);
-    size_t ift_actual            = min(n_frags, HTTPCacheAlt_v23::N_INTEGRAL_FRAG_OFFSETS) * sizeof(FragOffset);
+    size_t ift_actual            = std::min(n_frags, HTTPCacheAlt_v23::N_INTEGRAL_FRAG_OFFSETS) * sizeof(FragOffset);
 
     if (length < (HTTP_ALT_MARSHAL_SIZE + n_frags * sizeof(FragOffset) - IFT_SIZE)) {
       return false; // can't place fragment table.
diff --git a/iocore/cache/CacheDir.cc b/iocore/cache/CacheDir.cc
index 2616d81..01c76ef 100644
--- a/iocore/cache/CacheDir.cc
+++ b/iocore/cache/CacheDir.cc
@@ -605,7 +605,7 @@ dir_insert(const CacheKey *key, Vol *d, Dir *to_part)
   ink_assert(d->mutex->thread_holding == this_ethread());
   int s  = key->slice32(0) % d->segments, l;
   int bi = key->slice32(1) % d->buckets;
-  ink_assert(dir_approx_size(to_part) <= MAX_FRAG_SIZE + sizeofDoc);
+  ink_assert(dir_approx_size(to_part) <= MAX_FRAG_SIZE + sizeof(Doc));
   Dir *seg = dir_segment(s, d);
   Dir *e   = nullptr;
   Dir *b   = dir_bucket(bi, seg);
@@ -671,7 +671,7 @@ dir_overwrite(const CacheKey *key, Vol *d, Dir *dir, Dir *overwrite, bool must_o
   Vol *vol = d;
   CHECK_DIR(d);
 
-  ink_assert((unsigned int)dir_approx_size(dir) <= (unsigned int)(MAX_FRAG_SIZE + sizeofDoc)); // XXX - size should be unsigned
+  ink_assert((unsigned int)dir_approx_size(dir) <= (unsigned int)(MAX_FRAG_SIZE + sizeof(Doc))); // XXX - size should be unsigned
 Lagain:
   // find entry to overwrite
   e = b;
diff --git a/iocore/cache/CacheRead.cc b/iocore/cache/CacheRead.cc
index b9a6ccd..0ea053d 100644
--- a/iocore/cache/CacheRead.cc
+++ b/iocore/cache/CacheRead.cc
@@ -1094,10 +1094,10 @@ CacheVC::openReadStartHead(int event, Event *e)
             }
           }
           Note("OpenReadHead failed for cachekey %X : vector inconsistency - "
-               "unmarshalled %d expecting %d in %d (base=%d, ver=%d:%d) "
+               "unmarshalled %d expecting %d in %d (base=%zu, ver=%d:%d) "
                "- vector n=%d size=%d"
                "first alt=%d[%s]",
-               key.slice32(0), uml, doc->hlen, doc->len, sizeofDoc, doc->v_major, doc->v_minor, vector.count(), alt_length,
+               key.slice32(0), uml, doc->hlen, doc->len, sizeof(Doc), doc->v_major, doc->v_minor, vector.count(), alt_length,
                alt->m_magic,
                (CACHE_ALT_MAGIC_ALIVE == alt->m_magic ?
                   "alive" :
diff --git a/iocore/cache/CacheWrite.cc b/iocore/cache/CacheWrite.cc
index 5871980..d633799 100644
--- a/iocore/cache/CacheWrite.cc
+++ b/iocore/cache/CacheWrite.cc
@@ -190,9 +190,9 @@ CacheVC::handleWrite(int event, Event * /* e ATS_UNUSED */)
 
   set_agg_write_in_progress();
   POP_HANDLER;
-  agg_len = vol->round_to_approx_size(write_len + header_len + frag_len + sizeofDoc);
+  agg_len = vol->round_to_approx_size(write_len + header_len + frag_len + sizeof(Doc));
   vol->agg_todo_size += agg_len;
-  bool agg_error = (agg_len > AGG_SIZE || header_len + sizeofDoc > MAX_FRAG_SIZE ||
+  bool agg_error = (agg_len > AGG_SIZE || header_len + sizeof(Doc) > MAX_FRAG_SIZE ||
                     (!f.readers && (vol->agg_todo_size > cache_config_agg_write_backlog + AGG_SIZE) && write_len));
 #ifdef CACHE_AGG_FAIL_RATE
   agg_error = agg_error || ((uint32_t)mutex->thread_holding->generator.random() < (uint32_t)(UINT_MAX * CACHE_AGG_FAIL_RATE));
@@ -747,8 +747,8 @@ agg_copy(char *p, CacheVC *vc)
     Doc *doc                   = (Doc *)p;
     IOBufferBlock *res_alt_blk = nullptr;
 
-    uint32_t len = vc->write_len + vc->header_len + vc->frag_len + sizeofDoc;
-    ink_assert(vc->frag_type != CACHE_FRAG_TYPE_HTTP || len != sizeofDoc);
+    uint32_t len = vc->write_len + vc->header_len + vc->frag_len + sizeof(Doc);
+    ink_assert(vc->frag_type != CACHE_FRAG_TYPE_HTTP || len != sizeof(Doc));
     ink_assert(vol->round_to_approx_size(len) == vc->agg_len);
     // update copy of directory entry for this document
     dir_set_approx_size(&vc->dir, vc->agg_len);
@@ -792,7 +792,7 @@ agg_copy(char *p, CacheVC *vc)
     if (vc->f.rewrite_resident_alt) {
       ink_assert(vc->f.use_first_key);
       Doc *res_doc   = (Doc *)vc->first_buf->data();
-      res_alt_blk    = new_IOBufferBlock(vc->first_buf, res_doc->data_len(), sizeofDoc + res_doc->hlen);
+      res_alt_blk    = new_IOBufferBlock(vc->first_buf, res_doc->data_len(), sizeof(Doc) + res_doc->hlen);
       doc->key       = res_doc->key;
       doc->total_len = res_doc->data_len();
     }
@@ -1303,7 +1303,7 @@ CacheVC::openWriteClose(int event, Event *e)
         return openWriteCloseDir(event, e);
       }
     }
-    if (length && (fragment || length > MAX_FRAG_SIZE)) {
+    if (length && (fragment || length > static_cast<int>(MAX_FRAG_SIZE))) {
       SET_HANDLER(&CacheVC::openWriteCloseDataDone);
       write_len = length;
       if (write_len > MAX_FRAG_SIZE) {
@@ -1370,7 +1370,7 @@ CacheVC::openWriteWriteDone(int event, Event *e)
 static inline int
 target_fragment_size()
 {
-  return cache_config_target_fragment_size - sizeofDoc;
+  return cache_config_target_fragment_size - sizeof(Doc);
 }
 
 int
@@ -1406,7 +1406,7 @@ Lagain:
     avail -= (towrite - ntodo);
     towrite = ntodo;
   }
-  if (towrite > MAX_FRAG_SIZE) {
+  if (towrite > static_cast<int>(MAX_FRAG_SIZE)) {
     avail -= (towrite - MAX_FRAG_SIZE);
     towrite = MAX_FRAG_SIZE;
   }
diff --git a/iocore/cache/P_CacheBC.h b/iocore/cache/P_CacheBC.h
index 5a19ce3..1af43c1 100644
--- a/iocore/cache/P_CacheBC.h
+++ b/iocore/cache/P_CacheBC.h
@@ -119,21 +119,20 @@ struct Doc_v23 {
   size_t data_len();
 };
 
-static size_t const sizeofDoc_v23 = sizeof(Doc_v23);
 char *
 Doc_v23::data()
 {
-  return reinterpret_cast<char *>(this) + sizeofDoc_v23 + _flen + hlen;
+  return reinterpret_cast<char *>(this) + sizeof(Doc_v23) + _flen + hlen;
 }
 size_t
 Doc_v23::data_len()
 {
-  return len - sizeofDoc_v23 - hlen;
+  return len - sizeof(Doc_v23) - hlen;
 }
 char *
 Doc_v23::hdr()
 {
-  return reinterpret_cast<char *>(this) + sizeofDoc_v23;
+  return reinterpret_cast<char *>(this) + sizeof(Doc_v23);
 }
 
 } // namespace cache_bc
diff --git a/iocore/cache/P_CacheVol.h b/iocore/cache/P_CacheVol.h
index 1fbd337..b9e5490 100644
--- a/iocore/cache/P_CacheVol.h
+++ b/iocore/cache/P_CacheVol.h
@@ -41,7 +41,7 @@
 #define MAX_VOL_SIZE ((off_t)512 * 1024 * 1024 * 1024 * 1024)
 #define STORE_BLOCKS_PER_CACHE_BLOCK (STORE_BLOCK_SIZE / CACHE_BLOCK_SIZE)
 #define MAX_VOL_BLOCKS (MAX_VOL_SIZE / CACHE_BLOCK_SIZE)
-#define MAX_FRAG_SIZE (AGG_SIZE - sizeofDoc) // true max
+#define MAX_FRAG_SIZE (AGG_SIZE - sizeof(Doc)) // true max
 #define LEAVE_FREE DEFAULT_MAX_BUFFER_SIZE
 #define PIN_SCAN_EVERY 16 // scan every 1/16 of disk
 #define VOL_HASH_TABLE_SIZE 32707
@@ -52,8 +52,8 @@
 #define RECOVERY_SIZE EVACUATION_SIZE                // 8MB
 #define AIO_NOT_IN_PROGRESS 0
 #define AIO_AGG_WRITE_IN_PROGRESS -1
-#define AUTO_SIZE_RAM_CACHE -1                             // 1-1 with directory size
-#define DEFAULT_TARGET_FRAGMENT_SIZE (1048576 - sizeofDoc) // 1MB
+#define AUTO_SIZE_RAM_CACHE -1                               // 1-1 with directory size
+#define DEFAULT_TARGET_FRAGMENT_SIZE (1048576 - sizeof(Doc)) // 1MB
 
 #define dir_offset_evac_bucket(_o) (_o / (EVACUATION_BUCKET_SIZE / CACHE_BLOCK_SIZE))
 #define dir_evac_bucket(_e) dir_offset_evac_bucket(dir_offset(_e))
@@ -66,8 +66,6 @@
 #define DOC_CORRUPT ((uint32_t)0xDEADBABE)
 #define DOC_NO_CHECKSUM ((uint32_t)0xA0B0C0D0)
 
-#define sizeofDoc (((uint32_t)(uintptr_t) & ((Doc *)0)->checksum) + (uint32_t)sizeof(uint32_t))
-
 struct Cache;
 struct Vol;
 struct CacheDisk;
@@ -277,7 +275,6 @@ struct CacheVol {
 };
 
 // Note : hdr() needs to be 8 byte aligned.
-// If you change this, change sizeofDoc above
 struct Doc {
   uint32_t magic;        // DOC_MAGIC
   uint32_t len;          // length of this fragment (including hlen & sizeof(Doc), unrounded)
@@ -395,13 +392,13 @@ vol_relative_length(Vol *v, off_t start_offset)
 TS_INLINE uint32_t
 Doc::prefix_len()
 {
-  return sizeofDoc + hlen;
+  return sizeof(Doc) + hlen;
 }
 
 TS_INLINE uint32_t
 Doc::data_len()
 {
-  return len - sizeofDoc - hlen;
+  return len - sizeof(Doc) - hlen;
 }
 
 TS_INLINE int
@@ -413,7 +410,7 @@ Doc::single_fragment()
 TS_INLINE char *
 Doc::hdr()
 {
-  return reinterpret_cast<char *>(this) + sizeofDoc;
+  return reinterpret_cast<char *>(this) + sizeof(Doc);
 }
 
 TS_INLINE char *
diff --git a/iocore/cache/Store.cc b/iocore/cache/Store.cc
index cb41c26..44cc70d 100644
--- a/iocore/cache/Store.cc
+++ b/iocore/cache/Store.cc
@@ -392,20 +392,18 @@ Store::read_config()
       }
     }
 
-    char *pp = Layout::get()->relative(path);
+    std::string pp = Layout::get()->relative(path);
 
     ns = new Span;
-    Debug("cache_init", "Store::read_config - ns = new Span; ns->init(\"%s\",%" PRId64 "), forced volume=%d%s%s", pp, size,
+    Debug("cache_init", "Store::read_config - ns = new Span; ns->init(\"%s\",%" PRId64 "), forced volume=%d%s%s", pp.c_str(), size,
           volume_num, seed ? " id=" : "", seed ? seed : "");
-    if ((err = ns->init(pp, size))) {
-      RecSignalWarning(REC_SIGNAL_SYSTEM_ERROR, "could not initialize storage \"%s\" [%s]", pp, err);
-      Debug("cache_init", "Store::read_config - could not initialize storage \"%s\" [%s]", pp, err);
+    if ((err = ns->init(pp.c_str(), size))) {
+      RecSignalWarning(REC_SIGNAL_SYSTEM_ERROR, "could not initialize storage \"%s\" [%s]", pp.c_str(), err);
+      Debug("cache_init", "Store::read_config - could not initialize storage \"%s\" [%s]", pp.c_str(), err);
       delete ns;
-      ats_free(pp);
       continue;
     }
 
-    ats_free(pp);
     n_dsstore++;
 
     // Set side values if present.
@@ -575,7 +573,7 @@ Span::init(const char *path, int64_t size)
 
   // The actual size of a span always trumps the configured size.
   if (size > 0 && this->size() != size) {
-    int64_t newsz = MIN(size, this->size());
+    int64_t newsz = std::min(size, this->size());
 
     Note("cache %s '%s' is %" PRId64 " bytes, but the configured size is %" PRId64 " bytes, using the minimum",
          span_file_typename(sbuf.st_mode), path, this->size(), size);
diff --git a/iocore/dns/DNS.cc b/iocore/dns/DNS.cc
index e067682..3d3bcef 100644
--- a/iocore/dns/DNS.cc
+++ b/iocore/dns/DNS.cc
@@ -56,8 +56,12 @@ char *dns_local_ipv6                 = nullptr;
 char *dns_local_ipv4                 = nullptr;
 int dns_thread                       = 0;
 int dns_prefer_ipv6                  = 0;
+DNS_CONN_MODE dns_conn_mode          = DNS_CONN_MODE::UDP_ONLY;
+
 namespace
 {
+const int tcp_data_length_offset = 2;
+
 // Currently only used for A and AAAA.
 inline const char *
 QtypeName(int qtype)
@@ -83,10 +87,9 @@ ClassAllocator<HostEnt> dnsBufAllocator("dnsBufAllocator", 2);
 static bool dns_process(DNSHandler *h, HostEnt *ent, int len);
 static DNSEntry *get_dns(DNSHandler *h, uint16_t id);
 // returns true when e is done
-static void dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry);
-static void write_dns(DNSHandler *h);
-static bool write_dns_event(DNSHandler *h, DNSEntry *e);
-
+static void dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry, bool tcp_retry = false);
+static void write_dns(DNSHandler *h, bool tcp_retry = false);
+static bool write_dns_event(DNSHandler *h, DNSEntry *e, bool over_tcp = false);
 // "reliable" name to try. need to build up first.
 static int try_servers         = 0;
 static int local_num_entries   = 1;
@@ -111,6 +114,24 @@ ink_get16(const uint8_t *src)
   return dst;
 }
 
+static inline unsigned int
+get_rcode(char *buff)
+{
+  return reinterpret_cast<HEADER *>(buff)->rcode;
+}
+
+static inline unsigned int
+get_rcode(HostEnt *ent)
+{
+  return get_rcode(reinterpret_cast<char *>(ent->buf));
+}
+
+bool
+HostEnt::isNameError()
+{
+  return get_rcode(this) == NXDOMAIN;
+}
+
 void
 HostEnt::free()
 {
@@ -199,6 +220,9 @@ DNSProcessor::start(int, size_t stacksize)
   REC_ReadConfigStringAlloc(dns_local_ipv6, "proxy.config.dns.local_ipv6");
   REC_ReadConfigStringAlloc(dns_resolv_conf, "proxy.config.dns.resolv_conf");
   REC_EstablishStaticConfigInt32(dns_thread, "proxy.config.dns.dedicated_thread");
+  int dns_conn_mode_i = 0;
+  REC_EstablishStaticConfigInt32(dns_conn_mode_i, "proxy.config.dns.connection.mode");
+  dns_conn_mode = static_cast<DNS_CONN_MODE>(dns_conn_mode_i);
 
   if (dns_thread > 0) {
     // TODO: Hmmm, should we just get a single thread some other way?
@@ -424,12 +448,27 @@ DNSEntry::init(const char *x, int len, int qtype_arg, Continuation *acont, DNSPr
 }
 
 /**
+ Open UDP and/or TCP connections based on dns_conn_mode
+ */
+
+void
+DNSHandler::open_cons(sockaddr const *target, bool failed, int icon)
+{
+  if (dns_conn_mode != DNS_CONN_MODE::TCP_ONLY) {
+    open_con(target, failed, icon, false);
+  }
+  if (dns_conn_mode != DNS_CONN_MODE::UDP_ONLY) {
+    open_con(target, failed, icon, true);
+  }
+}
+
+/**
   Open (and close) connections as necessary and also assures that the
   epoll fd struct is properly updated.
 
 */
 void
-DNSHandler::open_con(sockaddr const *target, bool failed, int icon)
+DNSHandler::open_con(sockaddr const *target, bool failed, int icon, bool over_tcp)
 {
   ip_port_text_buffer ip_text;
   PollDescriptor *pd = get_PollDescriptor(dnsProcessor.thread);
@@ -439,22 +478,23 @@ DNSHandler::open_con(sockaddr const *target, bool failed, int icon)
   } else if (!target) {
     target = &ip.sa;
   }
+  DNSConnection &cur_con = over_tcp ? tcpcon[icon] : udpcon[icon];
 
   Debug("dns", "open_con: opening connection %s", ats_ip_nptop(target, ip_text, sizeof ip_text));
 
-  if (con[icon].fd != NO_FD) { // Remove old FD from epoll fd
-    con[icon].eio.stop();
-    con[icon].close();
+  if (cur_con.fd != NO_FD) { // Remove old FD from epoll fd
+    cur_con.eio.stop();
+    cur_con.close();
   }
 
-  if (con[icon].connect(target,
-                        DNSConnection::Options()
-                          .setNonBlockingConnect(true)
-                          .setNonBlockingIo(true)
-                          .setUseTcp(false)
-                          .setBindRandomPort(true)
-                          .setLocalIpv6(&local_ipv6.sa)
-                          .setLocalIpv4(&local_ipv4.sa)) < 0) {
+  if (cur_con.connect(target,
+                      DNSConnection::Options()
+                        .setNonBlockingConnect(true)
+                        .setNonBlockingIo(true)
+                        .setUseTcp(over_tcp)
+                        .setBindRandomPort(true)
+                        .setLocalIpv6(&local_ipv6.sa)
+                        .setLocalIpv4(&local_ipv4.sa)) < 0) {
     Debug("dns", "opening connection %s FAILED for %d", ip_text, icon);
     if (!failed) {
       if (dns_ns_rr) {
@@ -466,10 +506,10 @@ DNSHandler::open_con(sockaddr const *target, bool failed, int icon)
     return;
   } else {
     ns_down[icon] = 0;
-    if (con[icon].eio.start(pd, &con[icon], EVENTIO_READ) < 0) {
+    if (cur_con.eio.start(pd, &cur_con, EVENTIO_READ) < 0) {
       Error("[iocore_dns] open_con: Failed to add %d server to epoll list\n", icon);
     } else {
-      con[icon].num = icon;
+      cur_con.num = icon;
       Debug("dns", "opening connection %s SUCCEEDED for %d", ip_text, icon);
     }
   }
@@ -518,14 +558,14 @@ DNSHandler::startEvent(int /* event ATS_UNUSED */, Event *e)
         ip_port_text_buffer buff;
         sockaddr *sa = &m_res->nsaddr_list[i].sa;
         if (ats_is_ip(sa)) {
-          open_con(sa, false, n_con);
+          open_cons(sa, false, n_con);
           ++n_con;
           Debug("dns_pas", "opened connection to %s, n_con = %d", ats_ip_nptop(sa, buff, sizeof(buff)), n_con);
         }
       }
       dns_ns_rr_init_down = 0;
     } else {
-      open_con(nullptr); // use current target address.
+      open_cons(nullptr); // use current target address.
       n_con = 1;
     }
     e->ethread->schedule_every(this, DNS_PERIOD);
@@ -548,7 +588,7 @@ DNSHandler::startEvent_sdns(int /* event ATS_UNUSED */, Event *e)
   this->validate_ip();
 
   SET_HANDLER(&DNSHandler::mainEvent);
-  open_con(&ip.sa, false, n_con);
+  open_cons(&ip.sa, false, n_con);
   ++n_con; // TODO should n_con be zeroed?
 
   e->schedule_every(DNS_PERIOD);
@@ -556,10 +596,15 @@ DNSHandler::startEvent_sdns(int /* event ATS_UNUSED */, Event *e)
 }
 
 static inline int
-_ink_res_mkquery(ink_res_state res, char *qname, int qtype, char *buffer)
+_ink_res_mkquery(ink_res_state res, char *qname, int qtype, char *buffer, bool over_tcp = false)
 {
-  int r = ink_res_mkquery(res, QUERY, qname, C_IN, qtype, nullptr, 0, nullptr, (unsigned char *)buffer, MAX_DNS_PACKET_LEN);
-  return r;
+  int offset = over_tcp ? tcp_data_length_offset : 0;
+  int r =
+    ink_res_mkquery(res, QUERY, qname, C_IN, qtype, nullptr, 0, nullptr, (unsigned char *)buffer + offset, MAX_DNS_PACKET_LEN);
+  if (over_tcp) {
+    NS_PUT16(r, buffer);
+  }
+  return r + offset;
 }
 
 void
@@ -577,17 +622,23 @@ DNSHandler::retry_named(int ndx, ink_hrtime t, bool reopen)
   if (reopen && ((t - last_primary_reopen) > DNS_PRIMARY_REOPEN_PERIOD)) {
     Debug("dns", "retry_named: reopening DNS connection for index %d", ndx);
     last_primary_reopen = t;
-    con[ndx].close();
-    open_con(&m_res->nsaddr_list[ndx].sa, true, ndx);
+    if (dns_conn_mode != DNS_CONN_MODE::TCP_ONLY) {
+      udpcon[ndx].close();
+    }
+    if (dns_conn_mode != DNS_CONN_MODE::UDP_ONLY) {
+      tcpcon[ndx].close();
+    }
+    open_cons(&m_res->nsaddr_list[ndx].sa, true, ndx);
   }
-
+  bool over_tcp = dns_conn_mode == DNS_CONN_MODE::TCP_ONLY;
+  int con_fd    = over_tcp ? tcpcon[ndx].fd : udpcon[ndx].fd;
   char buffer[MAX_DNS_PACKET_LEN];
   Debug("dns", "trying to resolve '%s' from DNS connection, ndx %d", try_server_names[try_servers], ndx);
-  int r       = _ink_res_mkquery(m_res, try_server_names[try_servers], T_A, buffer);
+  int r       = _ink_res_mkquery(m_res, try_server_names[try_servers], T_A, buffer, over_tcp);
   try_servers = (try_servers + 1) % countof(try_server_names);
   ink_assert(r >= 0);
   if (r >= 0) { // looking for a bounce
-    int res = socketManager.send(con[ndx].fd, buffer, r, 0);
+    int res = socketManager.send(con_fd, buffer, r, 0);
     Debug("dns", "ping result = %d", res);
   }
 }
@@ -599,14 +650,15 @@ DNSHandler::try_primary_named(bool reopen)
   if (reopen && ((t - last_primary_reopen) > DNS_PRIMARY_REOPEN_PERIOD)) {
     Debug("dns", "try_primary_named: reopening primary DNS connection");
     last_primary_reopen = t;
-    open_con(&ip.sa, true, 0);
+    open_cons(&ip.sa, true, 0);
   }
   if ((t - last_primary_retry) > DNS_PRIMARY_RETRY_PERIOD) {
     char buffer[MAX_DNS_PACKET_LEN];
-
+    bool over_tcp      = dns_conn_mode == DNS_CONN_MODE::TCP_ONLY;
+    int con_fd         = over_tcp ? tcpcon[0].fd : udpcon[0].fd;
     last_primary_retry = t;
     Debug("dns", "trying to resolve '%s' from primary DNS connection", try_server_names[try_servers]);
-    int r = _ink_res_mkquery(m_res, try_server_names[try_servers], T_A, buffer);
+    int r = _ink_res_mkquery(m_res, try_server_names[try_servers], T_A, buffer, over_tcp);
     // if try_server_names[] is not full, round-robin within the
     // filled entries.
     if (local_num_entries < DEFAULT_NUM_TRY_SERVER) {
@@ -616,7 +668,7 @@ DNSHandler::try_primary_named(bool reopen)
     }
     ink_assert(r >= 0);
     if (r >= 0) { // looking for a bounce
-      int res = socketManager.send(con[0].fd, buffer, r, 0);
+      int res = socketManager.send(con_fd, buffer, r, 0);
       Debug("dns", "ping result = %d", res);
     }
   }
@@ -662,7 +714,7 @@ DNSHandler::failover()
       target.setToLoopback(AF_INET);
     }
 
-    open_con(&target.sa, true, name_server);
+    open_cons(&target.sa, true, name_server);
     if (n_con <= name_server) {
       n_con = name_server + 1;
     }
@@ -728,18 +780,6 @@ DNSHandler::rr_failure(int ndx)
   }
 }
 
-static inline unsigned int
-get_rcode(char *buff)
-{
-  return reinterpret_cast<HEADER *>(buff)->rcode;
-}
-
-static inline unsigned int
-get_rcode(HostEnt *ent)
-{
-  return get_rcode(reinterpret_cast<char *>(ent));
-}
-
 static bool
 good_rcode(char *buff)
 {
@@ -752,23 +792,61 @@ DNSHandler::recv_dns(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
 {
   DNSConnection *dnsc = nullptr;
   ip_text_buffer ipbuff1, ipbuff2;
-
+  Ptr<HostEnt> buf;
   while ((dnsc = (DNSConnection *)triggered.dequeue())) {
     while (true) {
+      int res;
       IpEndpoint from_ip;
       socklen_t from_length = sizeof(from_ip);
+      if (dnsc->opt._use_tcp) {
+        if (dnsc->tcp_data.buf_ptr == nullptr) {
+          dnsc->tcp_data.buf_ptr = make_ptr(dnsBufAllocator.alloc());
+        }
+        if (dnsc->tcp_data.total_length == 0) {
+          // reading total size
+          res = socketManager.recv(dnsc->fd, &(dnsc->tcp_data.total_length), sizeof(dnsc->tcp_data.total_length), 0);
+          if (res == -EAGAIN) {
+            break;
+          }
+          if (res <= 0) {
+            goto Lerror;
+          }
+          dnsc->tcp_data.total_length = ntohs(dnsc->tcp_data.total_length);
+          if (res != sizeof(dnsc->tcp_data.total_length) || dnsc->tcp_data.total_length > MAX_DNS_PACKET_LEN) {
+            goto Lerror;
+          }
+        }
+        // continue reading data
+        void *buf_start = (char *)dnsc->tcp_data.buf_ptr->buf + dnsc->tcp_data.done_reading;
+        res             = socketManager.recv(dnsc->fd, buf_start, dnsc->tcp_data.total_length - dnsc->tcp_data.done_reading, 0);
+        if (res == -EAGAIN) {
+          break;
+        }
+        if (res <= 0) {
+          goto Lerror;
+        }
+        Debug("dns", "received packet size = %d over TCP", res);
+        dnsc->tcp_data.done_reading += res;
+        if (dnsc->tcp_data.done_reading < dnsc->tcp_data.total_length) {
+          break;
+        }
+        buf = dnsc->tcp_data.buf_ptr;
+        res = dnsc->tcp_data.total_length;
+        dnsc->tcp_data.reset();
+        goto Lsuccess;
+      }
 
       if (!hostent_cache) {
         hostent_cache = dnsBufAllocator.alloc();
       }
-      HostEnt *buf = hostent_cache;
-
-      int res = socketManager.recvfrom(dnsc->fd, buf->buf, MAX_DNS_PACKET_LEN, 0, &from_ip.sa, &from_length);
 
+      res = socketManager.recvfrom(dnsc->fd, hostent_cache->buf, MAX_DNS_PACKET_LEN, 0, &from_ip.sa, &from_length);
+      Debug("dns", "DNSHandler::recv_dns res = [%d]", res);
       if (res == -EAGAIN) {
         break;
       }
       if (res <= 0) {
+      Lerror:
         Debug("dns", "named error: %d", res);
         if (dns_ns_rr) {
           rr_failure(dnsc->num);
@@ -784,11 +862,13 @@ DNSHandler::recv_dns(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
                 ats_ip_ntop(&dnsc->ip.sa, ipbuff2, sizeof ipbuff2));
         continue;
       }
+      buf              = hostent_cache;
       hostent_cache    = nullptr;
       buf->packet_size = res;
       Debug("dns", "received packet size = %d", res);
+    Lsuccess:
       if (dns_ns_rr) {
-        Debug("dns", "round-robin: nameserver %d DNS response code = %d", dnsc->num, get_rcode(buf));
+        Debug("dns", "round-robin: nameserver %d DNS response code = %d", dnsc->num, get_rcode(buf->buf));
         if (good_rcode(buf->buf)) {
           received_one(dnsc->num);
           if (ns_down[dnsc->num]) {
@@ -799,7 +879,7 @@ DNSHandler::recv_dns(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
         }
       } else {
         if (!dnsc->num) {
-          Debug("dns", "primary DNS response code = %d", get_rcode(buf));
+          Debug("dns", "primary DNS response code = %d", get_rcode(buf->buf));
           if (good_rcode(buf->buf)) {
             if (name_server) {
               recover();
@@ -809,8 +889,7 @@ DNSHandler::recv_dns(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
           }
         }
       }
-      Ptr<HostEnt> protect_hostent = make_ptr(buf);
-      if (dns_process(this, buf, res)) {
+      if (dns_process(this, buf.get(), res)) {
         if (dnsc->num == name_server) {
           received_one(name_server);
         }
@@ -908,7 +987,7 @@ get_entry(DNSHandler *h, char *qname, int qtype)
 
 /** Write up to dns_max_dns_in_flight entries. */
 static void
-write_dns(DNSHandler *h)
+write_dns(DNSHandler *h, bool tcp_retry)
 {
   ProxyMutex *mutex = h->mutex.get();
   DNS_INCREMENT_DYN_STAT(dns_total_lookups_stat);
@@ -928,6 +1007,7 @@ write_dns(DNSHandler *h)
     return;
   }
   h->in_write_dns = true;
+  bool over_tcp   = (dns_conn_mode == DNS_CONN_MODE::TCP_ONLY) || ((dns_conn_mode == DNS_CONN_MODE::TCP_RETRY) && tcp_retry);
   // Debug("dns", "in_flight: %d, dns_max_dns_in_flight: %d", h->in_flight, dns_max_dns_in_flight);
   if (h->in_flight < dns_max_dns_in_flight) {
     DNSEntry *e = h->entries.head;
@@ -940,7 +1020,7 @@ write_dns(DNSHandler *h)
             h->name_server = (h->name_server + 1) % max_nscount;
           } while (h->ns_down[h->name_server] && h->name_server != ns_start);
         }
-        if (h->ns_down[h->name_server] || !write_dns_event(h, e)) {
+        if (h->ns_down[h->name_server] || !write_dns_event(h, e, over_tcp)) {
           break;
         }
       }
@@ -993,31 +1073,31 @@ DNSHandler::get_query_id()
 
 */
 static bool
-write_dns_event(DNSHandler *h, DNSEntry *e)
+write_dns_event(DNSHandler *h, DNSEntry *e, bool over_tcp)
 {
   ProxyMutex *mutex = h->mutex.get();
-  union {
-    HEADER _h;
-    char _b[MAX_DNS_PACKET_LEN];
-  } blob;
-  int r = 0;
+  char buffer[MAX_DNS_PACKET_LEN];
+  int offset     = over_tcp ? tcp_data_length_offset : 0;
+  HEADER *header = (HEADER *)(buffer + offset);
+  int r          = 0;
 
-  if ((r = _ink_res_mkquery(h->m_res, e->qname, e->qtype, blob._b)) <= 0) {
+  if ((r = _ink_res_mkquery(h->m_res, e->qname, e->qtype, buffer, over_tcp)) <= 0) {
     Debug("dns", "cannot build query: %s", e->qname);
     dns_result(h, e, nullptr, false);
     return true;
   }
 
   uint16_t i = h->get_query_id();
-  blob._h.id = htons(i);
+  header->id = htons(i);
   if (e->id[dns_retries - e->retries] >= 0) {
     // clear previous id in case named was switched or domain was expanded
     h->release_query_id(e->id[dns_retries - e->retries]);
   }
   e->id[dns_retries - e->retries] = i;
-  Debug("dns", "send query (qtype=%d) for %s to fd %d", e->qtype, e->qname, h->con[h->name_server].fd);
+  int con_fd                      = over_tcp ? h->tcpcon[h->name_server].fd : h->udpcon[h->name_server].fd;
+  Debug("dns", "send query (qtype=%d) for %s to fd %d", e->qtype, e->qname, con_fd);
 
-  int s = socketManager.send(h->con[h->name_server].fd, blob._b, r, 0);
+  int s = socketManager.send(con_fd, buffer, r, 0);
   if (s != r) {
     Debug("dns", "send() failed: qname = %s, %d != %d, nameserver= %d", e->qname, s, r, h->name_server);
     // changed if condition from 'r < 0' to 's < 0' - 8/2001 pas
@@ -1152,12 +1232,13 @@ DNSProcessor::getby(const char *x, int len, int type, Continuation *cont, Option
   is a retry-able and we have retries left.
 */
 static void
-dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
+dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry, bool tcp_retry)
 {
   ProxyMutex *mutex = h->mutex.get();
   bool cancelled    = (e->action.cancelled ? true : false);
+  retry             = retry || tcp_retry;
 
-  if (!ent && !cancelled) {
+  if ((!ent || !ent->good) && !cancelled) {
     // try to retry operation
     if (retry && e->retries) {
       Debug("dns", "doing retry for %s", e->qname);
@@ -1165,7 +1246,7 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
       DNS_INCREMENT_DYN_STAT(dns_retries_stat);
 
       --(e->retries);
-      write_dns(h);
+      write_dns(h, tcp_retry);
       return;
     } else if (e->domains && *e->domains) {
       do {
@@ -1181,7 +1262,7 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
           ++(e->domains);
           e->retries = dns_retries;
           Debug("dns", "new name = %s retries = %d", e->qname, e->retries);
-          write_dns(h);
+          write_dns(h, tcp_retry);
 
           return;
         }
@@ -1193,7 +1274,7 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
       e->qname[e->qname_len] = 0;
       if (!strchr(e->qname, '.') && !e->last) {
         e->last = true;
-        write_dns(h);
+        write_dns(h, tcp_retry);
         return;
       }
     }
@@ -1205,7 +1286,7 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
     ent = nullptr;
   }
   if (!cancelled) {
-    if (!ent) {
+    if (!ent || !ent->good) {
       DNS_SUM_DYN_STAT(dns_fail_time_stat, Thread::get_hrtime() - e->submit_time);
     } else {
       DNS_SUM_DYN_STAT(dns_success_time_stat, Thread::get_hrtime() - e->submit_time);
@@ -1218,13 +1299,13 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
       ip_text_buffer buff;
       const char *ptr    = "<none>";
       const char *result = "FAIL";
-      if (ent) {
+      if (ent && ent->good) {
         result = "SUCCESS";
         ptr    = inet_ntop(e->qtype == T_AAAA ? AF_INET6 : AF_INET, ent->ent.h_addr_list[0], buff, sizeof(buff));
       }
       Debug("dns", "%s result for %s = %s retry %d", result, e->qname, ptr, retry);
     } else {
-      if (ent) {
+      if (ent && ent->good) {
         Debug("dns", "SUCCESS result for %s = %s af=%d retry %d", e->qname, ent->ent.h_name, ent->ent.h_addrtype, retry);
       } else {
         Debug("dns", "FAIL result for %s = <not found> retry %d", e->qname, retry);
@@ -1232,7 +1313,7 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry)
     }
   }
 
-  if (ent) {
+  if (ent && ent->good) {
     DNS_INCREMENT_DYN_STAT(dns_lookup_success_stat);
   } else {
     DNS_INCREMENT_DYN_STAT(dns_lookup_fail_stat);
@@ -1331,6 +1412,7 @@ dns_process(DNSHandler *handler, HostEnt *buf, int len)
   HEADER *h         = (HEADER *)(buf->buf);
   DNSEntry *e       = get_dns(handler, (uint16_t)ntohs(h->id));
   bool retry        = false;
+  bool tcp_retry    = false;
   bool server_ok    = true;
   uint32_t temp_ttl = 0;
 
@@ -1350,6 +1432,13 @@ dns_process(DNSHandler *handler, HostEnt *buf, int len)
 
   DNS_SUM_DYN_STAT(dns_response_time_stat, Thread::get_hrtime() - e->send_time);
 
+  // retrying over TCP when truncated is set
+  if (dns_conn_mode == DNS_CONN_MODE::TCP_RETRY && h->tc == 1) {
+    Debug("dns", "Retrying DNS query over TCP for [%s]", e->qname);
+    tcp_retry = true;
+    goto Lerror;
+  }
+
   if (h->rcode != NOERROR || !h->ancount) {
     Debug("dns", "received rcode = %d", h->rcode);
     switch (h->rcode) {
@@ -1391,12 +1480,12 @@ dns_process(DNSHandler *handler, HostEnt *buf, int len)
     u_char *cp        = ((u_char *)h) + HFIXEDSZ;
     u_char *eom       = (u_char *)h + len;
     int n;
-    ink_assert(buf->srv_hosts.srv_host_count == 0 && buf->srv_hosts.srv_hosts_length == 0);
-    buf->srv_hosts.srv_host_count   = 0;
+    ink_assert(buf->srv_hosts.hosts.size() == 0 && buf->srv_hosts.srv_hosts_length == 0);
+    buf->srv_hosts.hosts.clear();
     buf->srv_hosts.srv_hosts_length = 0;
-    unsigned &num_srv               = buf->srv_hosts.srv_host_count;
     int rname_len                   = -1;
 
+    Debug("dns", "Got %d DNS records for [%s]", ancount, e->qname);
     //
     // Expand name
     //
@@ -1568,7 +1657,7 @@ dns_process(DNSHandler *handler, HostEnt *buf, int len)
           buflen -= n;
         }
       } else if (type == T_SRV) {
-        if (num_srv >= HOST_DB_MAX_ROUND_ROBIN_INFO) {
+        if (buf->srv_hosts.hosts.size() >= hostdb_round_robin_max_count) {
           break;
         }
         cp         = here; /* hack */
@@ -1577,30 +1666,31 @@ dns_process(DNSHandler *handler, HostEnt *buf, int len)
         const unsigned char *srv_off = cp;
         cp += SRV_FIXEDSZ;
         cp += dn_skipname(cp, eom);
-        here     = cp; /* hack */
-        SRV *srv = &buf->srv_hosts.hosts[num_srv];
+        here = cp; /* hack */
+
+        SRV srv;
 
         // expand the name
-        n = ink_dn_expand((u_char *)h, eom, srv_off + SRV_SERVER, (u_char *)srv->host, MAXDNAME);
+        n = ink_dn_expand((u_char *)h, eom, srv_off + SRV_SERVER, (u_char *)srv.host, MAXDNAME);
         if (n < 0) {
           ++error;
           break;
         }
         Debug("dns_srv", "Discovered SRV record [from NS lookup] with cost:%d weight:%d port:%d with host:%s",
-              ink_get16(srv_off + SRV_COST), ink_get16(srv_off + SRV_WEIGHT), ink_get16(srv_off + SRV_PORT), srv->host);
+              ink_get16(srv_off + SRV_COST), ink_get16(srv_off + SRV_WEIGHT), ink_get16(srv_off + SRV_PORT), srv.host);
 
-        srv->port     = ink_get16(srv_off + SRV_PORT);
-        srv->priority = ink_get16(srv_off + SRV_COST);
-        srv->weight   = ink_get16(srv_off + SRV_WEIGHT);
-        srv->host_len = ::strlen(srv->host) + 1;
-        srv->key      = makeHostHash(srv->host);
+        srv.port     = ink_get16(srv_off + SRV_PORT);
+        srv.priority = ink_get16(srv_off + SRV_COST);
+        srv.weight   = ink_get16(srv_off + SRV_WEIGHT);
+        srv.host_len = ::strlen(srv.host) + 1;
+        srv.key      = makeHostHash(srv.host);
 
-        if (srv->host[0] != '\0') {
-          buf->srv_hosts.srv_hosts_length += srv->host_len;
+        if (srv.host[0] != '\0') {
+          buf->srv_hosts.srv_hosts_length += srv.host_len;
         } else {
           continue;
         }
-        ++num_srv;
+        buf->srv_hosts.hosts.push_back(srv);
       } else if (is_addr_query(type)) {
         if (answer) {
           if (n != buf->ent.h_length) {
@@ -1651,13 +1741,15 @@ dns_process(DNSHandler *handler, HostEnt *buf, int len)
         ink_strlcpy((char *)bp, e->qname, sizeof(buf->hostbuf) - (bp - buf->hostbuf));
         buf->ent.h_name = (char *)bp;
       }
+      Debug("dns", "Returning %d DNS records for [%s]", answer, e->qname);
       dns_result(handler, e, buf, retry);
       return server_ok;
     }
   }
 Lerror:;
   DNS_INCREMENT_DYN_STAT(dns_lookup_fail_stat);
-  dns_result(handler, e, nullptr, retry);
+  buf->good = false;
+  dns_result(handler, e, buf, retry, tcp_retry);
   return server_ok;
 }
 
diff --git a/iocore/dns/DNSConnection.cc b/iocore/dns/DNSConnection.cc
index eddbadd..223aef4 100644
--- a/iocore/dns/DNSConnection.cc
+++ b/iocore/dns/DNSConnection.cc
@@ -33,6 +33,7 @@
 
 #define SET_TCP_NO_DELAY
 #define SET_NO_LINGER
+#define SET_SO_KEEPALIVE
 // set in the OS
 // #define RECV_BUF_SIZE            (1024*64)
 // #define SEND_BUF_SIZE            (1024*64)
@@ -84,6 +85,8 @@ DNSConnection::connect(sockaddr const *addr, Options const &opt)
 {
   ink_assert(fd == NO_FD);
   ink_assert(ats_is_ip(addr));
+  this->opt = opt;
+  this->tcp_data.reset();
 
   int res = 0;
   short Proto;
diff --git a/iocore/dns/I_DNSProcessor.h b/iocore/dns/I_DNSProcessor.h
index 0c3d324..034551c 100644
--- a/iocore/dns/I_DNSProcessor.h
+++ b/iocore/dns/I_DNSProcessor.h
@@ -26,12 +26,14 @@
 
 #include "SRV.h"
 
-#define MAX_DNS_PACKET_LEN 8192
-#define DNS_MAX_ALIASES 35
-#define DNS_MAX_ADDRS 35
-#define DNS_HOSTBUF_SIZE 8192
-#define DOMAIN_SERVICE_PORT 53
-#define DEFAULT_DOMAIN_NAME_SERVER 0 // use the default server
+const int DOMAIN_SERVICE_PORT        = NAMESERVER_PORT;
+const int DEFAULT_DOMAIN_NAME_SERVER = 0;
+
+const int MAX_DNS_PACKET_LEN = 8192;
+const int DNS_RR_MAX_COUNT   = (MAX_DNS_PACKET_LEN - HFIXEDSZ + RRFIXEDSZ - 1) / RRFIXEDSZ;
+const int DNS_MAX_ALIASES    = DNS_RR_MAX_COUNT;
+const int DNS_MAX_ADDRS      = DNS_RR_MAX_COUNT;
+const int DNS_HOSTBUF_SIZE   = MAX_DNS_PACKET_LEN;
 
 /**
   All buffering required to handle a DNS receipt. For asynchronous DNS,
@@ -48,7 +50,8 @@ struct HostEnt : RefCountObj {
   u_char *h_addr_ptrs[DNS_MAX_ADDRS + 1] = {nullptr};
   u_char hostbuf[DNS_HOSTBUF_SIZE]       = {0};
   SRVHosts srv_hosts;
-
+  bool good = true;
+  bool isNameError();
   virtual void free();
 };
 
diff --git a/iocore/dns/P_DNSConnection.h b/iocore/dns/P_DNSConnection.h
index 8fac46e..c2aa57b 100644
--- a/iocore/dns/P_DNSConnection.h
+++ b/iocore/dns/P_DNSConnection.h
@@ -32,11 +32,13 @@
 #define __P_DNSCONNECTION_H__
 
 #include "I_EventSystem.h"
+#include "I_DNSProcessor.h"
 
 //
 // Connection
 //
 struct DNSHandler;
+enum class DNS_CONN_MODE { UDP_ONLY, TCP_RETRY, TCP_ONLY };
 
 struct DNSConnection {
   /// Options for connecting.
@@ -75,11 +77,26 @@ struct DNSConnection {
   int fd;
   IpEndpoint ip;
   int num;
+  Options opt;
   LINK(DNSConnection, link);
   EventIO eio;
   InkRand generator;
   DNSHandler *handler;
 
+  /// TCPData structure is to track the reading progress of a TCP connection
+  struct TCPData {
+    Ptr<HostEnt> buf_ptr;
+    unsigned short total_length = 0;
+    unsigned short done_reading = 0;
+    void
+    reset()
+    {
+      buf_ptr.clear();
+      total_length = 0;
+      done_reading = 0;
+    }
+  } tcp_data;
+
   int connect(sockaddr const *addr, Options const &opt = DEFAULT_OPTIONS);
   /*
                 bool non_blocking_connect = NON_BLOCKING_CONNECT,
diff --git a/iocore/dns/P_DNSProcessor.h b/iocore/dns/P_DNSProcessor.h
index f51b743..304322e 100644
--- a/iocore/dns/P_DNSProcessor.h
+++ b/iocore/dns/P_DNSProcessor.h
@@ -188,7 +188,8 @@ struct DNSHandler : public Continuation {
   IpEndpoint local_ipv4; ///< Local V4 address if set.
   int ifd[MAX_NAMED];
   int n_con;
-  DNSConnection con[MAX_NAMED];
+  DNSConnection tcpcon[MAX_NAMED];
+  DNSConnection udpcon[MAX_NAMED];
   Queue<DNSEntry> entries;
   Queue<DNSConnection> triggered;
   int in_flight;
@@ -250,7 +251,8 @@ struct DNSHandler : public Continuation {
   int startEvent_sdns(int event, Event *e);
   int mainEvent(int event, Event *e);
 
-  void open_con(sockaddr const *addr, bool failed = false, int icon = 0);
+  void open_cons(sockaddr const *addr, bool failed = false, int icon = 0);
+  void open_con(sockaddr const *addr, bool failed = false, int icon = 0, bool over_tcp = false);
   void failover();
   void rr_failure(int ndx);
   void recover();
@@ -329,7 +331,8 @@ DNSHandler::DNSHandler()
     failover_soon_number[i]    = 0;
     crossed_failover_number[i] = 0;
     ns_down[i]                 = 1;
-    con[i].handler             = this;
+    tcpcon[i].handler          = this;
+    udpcon[i].handler          = this;
   }
   memset(&qid_in_flight, 0, sizeof(qid_in_flight));
   SET_HANDLER(&DNSHandler::startEvent);
diff --git a/iocore/dns/SRV.h b/iocore/dns/SRV.h
index 9b09c52..79ccbc5 100644
--- a/iocore/dns/SRV.h
+++ b/iocore/dns/SRV.h
@@ -24,12 +24,12 @@
 #ifndef _SRV_h_
 #define _SRV_h_
 
+#include <vector>
 #include "ts/ink_platform.h"
 #include "I_HostDBProcessor.h"
 
 struct HostDBInfo;
 
-#define HOST_DB_MAX_ROUND_ROBIN_INFO 16
 #define RAND_INV_RANGE(r) ((int)((RAND_MAX + 1) / (r)))
 
 struct SRV {
@@ -50,9 +50,8 @@ operator<(const SRV &left, const SRV &right)
 }
 
 struct SRVHosts {
-  unsigned int srv_host_count   = 0;
   unsigned int srv_hosts_length = 0;
-  SRV hosts[HOST_DB_MAX_ROUND_ROBIN_INFO];
+  std::vector<SRV> hosts;
 };
 
 #endif
diff --git a/iocore/eventsystem/I_Continuation.h b/iocore/eventsystem/I_Continuation.h
index 9c1521d..1881c3a 100644
--- a/iocore/eventsystem/I_Continuation.h
+++ b/iocore/eventsystem/I_Continuation.h
@@ -148,7 +148,7 @@ public:
 
   */
   int
-  handleEvent(int event = CONTINUATION_EVENT_NONE, void *data = 0)
+  handleEvent(int event = CONTINUATION_EVENT_NONE, void *data = nullptr)
   {
     return (this->*handler)(event, data);
   }
diff --git a/iocore/eventsystem/I_SocketManager.h b/iocore/eventsystem/I_SocketManager.h
index c85aeec..b8ff1d0 100644
--- a/iocore/eventsystem/I_SocketManager.h
+++ b/iocore/eventsystem/I_SocketManager.h
@@ -77,9 +77,9 @@ struct SocketManager {
 
   // result is the number of bytes or -errno
   int64_t read(int fd, void *buf, int len, void *pOLP = nullptr);
-  int64_t vector_io(int fd, struct iovec *vector, size_t count, int read_request, void *pOLP = 0);
+  int64_t vector_io(int fd, struct iovec *vector, size_t count, int read_request, void *pOLP = nullptr);
   int64_t readv(int fd, struct iovec *vector, size_t count);
-  int64_t read_vector(int fd, struct iovec *vector, size_t count, void *pOLP = 0);
+  int64_t read_vector(int fd, struct iovec *vector, size_t count, void *pOLP = nullptr);
   int64_t pread(int fd, void *buf, int len, off_t offset, char *tag = nullptr);
 
   int recv(int s, void *buf, int len, int flags);
@@ -87,12 +87,12 @@ struct SocketManager {
 
   int64_t write(int fd, void *buf, int len, void *pOLP = nullptr);
   int64_t writev(int fd, struct iovec *vector, size_t count);
-  int64_t write_vector(int fd, struct iovec *vector, size_t count, void *pOLP = 0);
+  int64_t write_vector(int fd, struct iovec *vector, size_t count, void *pOLP = nullptr);
   int64_t pwrite(int fd, void *buf, int len, off_t offset, char *tag = nullptr);
 
   int send(int fd, void *buf, int len, int flags);
   int sendto(int fd, void *buf, int len, int flags, struct sockaddr const *to, int tolen);
-  int sendmsg(int fd, struct msghdr *m, int flags, void *pOLP = 0);
+  int sendmsg(int fd, struct msghdr *m, int flags, void *pOLP = nullptr);
   int64_t lseek(int fd, off_t offset, int whence);
   int fstat(int fd, struct stat *);
   int unlink(char *buf);
diff --git a/iocore/eventsystem/P_UnixSocketManager.h b/iocore/eventsystem/P_UnixSocketManager.h
index b023a8d..b45f745 100644
--- a/iocore/eventsystem/P_UnixSocketManager.h
+++ b/iocore/eventsystem/P_UnixSocketManager.h
@@ -118,7 +118,7 @@ SocketManager::vector_io(int fd, struct iovec *vector, size_t count, int read_re
   int64_t current_request_bytes;
 
   for (n_vec = 0; n_vec < (int)count; n_vec += max_iovecs_per_request) {
-    current_count = min(max_iovecs_per_request, ((int)(count - n_vec)));
+    current_count = std::min(max_iovecs_per_request, ((int)(count - n_vec)));
     do {
       // coverity[tainted_data_argument]
       r = read_request ? ::readv(fd, &vector[n_vec], current_count) : ::writev(fd, &vector[n_vec], current_count);
diff --git a/iocore/eventsystem/UnixEventProcessor.cc b/iocore/eventsystem/UnixEventProcessor.cc
index aa3f35d..7fc31e7 100644
--- a/iocore/eventsystem/UnixEventProcessor.cc
+++ b/iocore/eventsystem/UnixEventProcessor.cc
@@ -323,7 +323,6 @@ EventProcessor::spawn_event_threads(EventType ev_type, int n_threads, size_t sta
     t->id                        = i; // unfortunately needed to support affinity and NUMA logic.
     t->set_event_type(ev_type);
     t->schedule_spawn(&thread_initializer);
-    snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", tg->_name.get(), i);
   }
   tg->_count = n_threads;
   n_ethreads += n_threads;
@@ -332,6 +331,7 @@ EventProcessor::spawn_event_threads(EventType ev_type, int n_threads, size_t sta
   // the group. Some thread set up depends on knowing the total number of threads but that can't be
   // safely updated until all the EThread instances are created and stored in the table.
   for (i = 0; i < n_threads; ++i) {
+    snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", tg->_name.get(), i);
     void *stack = Thread_Affinity_Initializer.alloc_stack(tg->_thread[i], stacksize);
     tg->_thread[i]->start(thr_name, stack, stacksize);
   }
@@ -352,7 +352,7 @@ EventProcessor::initThreadState(EThread *t)
       // To avoid race conditions on the event in the spawn queue, create a local one to actually send.
       // Use the spawn queue event as a read only model.
       Event *nev = eventAllocator.alloc();
-      for (Event *ev = thread_group[i]._spawnQueue.head; NULL != ev; ev = ev->link.next) {
+      for (Event *ev = thread_group[i]._spawnQueue.head; nullptr != ev; ev = ev->link.next) {
         nev->init(ev->continuation, 0, 0);
         nev->ethread        = t;
         nev->callback_event = ev->callback_event;
diff --git a/iocore/hostdb/HostDB.cc b/iocore/hostdb/HostDB.cc
index 57295bb..af42b1c 100644
--- a/iocore/hostdb/HostDB.cc
+++ b/iocore/hostdb/HostDB.cc
@@ -51,6 +51,7 @@ int hostdb_lookup_timeout                      = 30;
 int hostdb_insert_timeout                      = 160;
 int hostdb_re_dns_on_reload                    = false;
 int hostdb_ttl_mode                            = TTL_OBEY;
+unsigned int hostdb_round_robin_max_count      = 16;
 unsigned int hostdb_ip_stale_interval          = HOST_DB_IP_STALE;
 unsigned int hostdb_ip_timeout_interval        = HOST_DB_IP_TIMEOUT;
 unsigned int hostdb_ip_fail_timeout_interval   = HOST_DB_IP_FAIL_TIMEOUT;
@@ -239,6 +240,18 @@ HostDBCache::HostDBCache() : refcountcache(nullptr), pending_dns(nullptr), remot
   hosts_file_ptr = new RefCountedHostsFileMap();
 }
 
+bool
+HostDBCache::is_pending_dns_for_hash(const INK_MD5 &md5_hash)
+{
+  Queue<HostDBContinuation> &q = pending_dns_for_hash(md5_hash);
+  for (HostDBContinuation *c = q.head; c; c = (HostDBContinuation *)c->link.next) {
+    if (md5_hash == c->md5.hash) {
+      return true;
+    }
+  }
+  return false;
+}
+
 HostDBCache *
 HostDBProcessor::cache()
 {
@@ -397,6 +410,7 @@ HostDBProcessor::start(int, size_t)
   REC_EstablishStaticConfigInt32U(hostdb_ip_fail_timeout_interval, "proxy.config.hostdb.fail.timeout");
   REC_EstablishStaticConfigInt32U(hostdb_serve_stale_but_revalidate, "proxy.config.hostdb.serve_stale_for");
   REC_EstablishStaticConfigInt32U(hostdb_hostfile_check_interval, "proxy.config.hostdb.host_file.interval");
+  REC_EstablishStaticConfigInt32U(hostdb_round_robin_max_count, "proxy.config.hostdb.round_robin_max_count");
 
   //
   // Set up hostdb_current_interval
@@ -554,6 +568,11 @@ probe(ProxyMutex *mutex, HostDBMD5 const &md5, bool ignore_timeout)
 
   // If the record is stale, but we want to revalidate-- lets start that up
   if ((!ignore_timeout && r->is_ip_stale() && !r->reverse_dns) || (r->is_ip_timeout() && r->serve_stale_but_revalidate())) {
+    if (hostDB.is_pending_dns_for_hash(md5.hash)) {
+      Debug("hostdb", "stale %u %u %u, using it and pending to refresh it", r->ip_interval(), r->ip_timestamp,
+            r->ip_timeout_interval);
+      return r;
+    }
     Debug("hostdb", "stale %u %u %u, using it and refreshing it", r->ip_interval(), r->ip_timestamp, r->ip_timeout_interval);
     HostDBContinuation *c = hostDBContAllocator.alloc();
     HostDBContinuation::Options copt;
@@ -1155,9 +1174,9 @@ HostDBContinuation::lookup_done(IpAddr const &ip, const char *aname, bool around
       }
       r->is_srv = false;
     } else if (is_srv()) {
-      ink_assert(srv && srv->srv_host_count > 0 && srv->srv_host_count <= 16 && around_robin);
+      ink_assert(srv && srv->hosts.size() && srv->hosts.size() <= hostdb_round_robin_max_count && around_robin);
 
-      r->data.srv.srv_offset = srv->srv_host_count;
+      r->data.srv.srv_offset = srv->hosts.size();
       r->reverse_dns         = false;
       r->is_srv              = true;
       r->round_robin         = around_robin;
@@ -1268,13 +1287,13 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
     timeout = thread->schedule_in(this, HRTIME_SECONDS(hostdb_insert_timeout));
     return EVENT_DONE;
   } else {
-    bool failed = !e;
+    bool failed = !e || !e->good;
 
     bool is_rr     = false;
     pending_action = nullptr;
 
     if (is_srv()) {
-      is_rr = !failed && (e->srv_hosts.srv_host_count > 0);
+      is_rr = !failed && (e->srv_hosts.hosts.size() > 0);
     } else if (!failed) {
       is_rr = nullptr != e->ent.h_addr_list[1];
     } else {
@@ -1284,6 +1303,12 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
     int ttl_seconds = failed ? 0 : e->ttl; // ebalsa: moving to second accuracy
 
     Ptr<HostDBInfo> old_r = probe(mutex.get(), md5, false);
+    // If the DNS lookup failed with NXDOMAIN, remove the old record
+    if (e && e->isNameError() && old_r) {
+      hostDB.refcountcache->erase(old_r->key);
+      old_r = nullptr;
+      Debug("hostdb", "Removing the old record when the DNS lookup failed with NXDOMAIN");
+    }
     HostDBInfo old_info;
     if (old_r) {
       old_info = *old_r.get();
@@ -1296,11 +1321,11 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
     // total number of records
     if (is_rr) {
       if (is_srv() && !failed) {
-        valid_records = e->srv_hosts.srv_host_count;
+        valid_records = e->srv_hosts.hosts.size();
       } else {
         void *ptr; // tmp for current entry.
         for (int total_records = 0;
-             total_records < HOST_DB_MAX_ROUND_ROBIN_INFO && nullptr != (ptr = e->ent.h_addr_list[total_records]);
+             total_records < (int)hostdb_round_robin_max_count && nullptr != (ptr = e->ent.h_addr_list[total_records]);
              ++total_records) {
           if (is_addr_valid(af, ptr)) {
             if (!first_record) {
@@ -1339,8 +1364,8 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
 
     int allocSize = s_size + rrsize; // The extra space we need for the rest of the things
 
-    Debug("hostdb", "allocating %d bytes for %s with %d RR records", allocSize, aname, valid_records);
     HostDBInfo *r = HostDBInfo::alloc(allocSize);
+    Debug("hostdb", "allocating %d bytes for %s with %d RR records at [%p]", allocSize, aname, valid_records, r);
     // set up the record
     r->key = md5.hash.fold(); // always set the key
 
@@ -1348,10 +1373,11 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
     ink_strlcpy(r->perm_hostname(), aname, s_size);
     offset += s_size;
 
-    // If the DNS lookup failed (errors such as NXDOMAIN, SERVFAIL, etc.) but we have an old record
+    // If the DNS lookup failed (errors such as SERVFAIL, etc.) but we have an old record
     // which is okay with being served stale-- lets continue to serve the stale record as long as
     // the record is willing to be served.
     if (failed && old_r && old_r->serve_stale_but_revalidate()) {
+      r->free();
       r = old_r.get();
     } else if (is_byname()) {
       if (first_record) {
@@ -1382,8 +1408,8 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
       if (is_srv()) {
         int skip  = 0;
         char *pos = (char *)rr_data + sizeof(HostDBRoundRobin) + valid_records * sizeof(HostDBInfo);
-        SRV *q[HOST_DB_MAX_ROUND_ROBIN_INFO];
-        ink_assert(valid_records <= HOST_DB_MAX_ROUND_ROBIN_INFO);
+        SRV *q[valid_records];
+        ink_assert(valid_records <= (int)hostdb_round_robin_max_count);
         // sort
         for (int i = 0; i < valid_records; ++i) {
           q[i] = &e->srv_hosts.hosts[i];
diff --git a/iocore/hostdb/I_HostDBProcessor.h b/iocore/hostdb/I_HostDBProcessor.h
index d4e5e06..565efe1 100644
--- a/iocore/hostdb/I_HostDBProcessor.h
+++ b/iocore/hostdb/I_HostDBProcessor.h
@@ -42,9 +42,6 @@
 #define EVENT_SRV_IP_REMOVED (SRV_EVENT_EVENTS_START + 1)
 #define EVENT_SRV_GET_RESPONSE (SRV_EVENT_EVENTS_START + 2)
 
-// TODO: make configurable
-#define HOST_DB_MAX_ROUND_ROBIN_INFO 16
-
 //
 // Data
 //
@@ -64,6 +61,7 @@ extern unsigned int hostdb_ip_stale_interval;
 extern unsigned int hostdb_ip_timeout_interval;
 extern unsigned int hostdb_ip_fail_timeout_interval;
 extern unsigned int hostdb_serve_stale_but_revalidate;
+extern unsigned int hostdb_round_robin_max_count;
 
 static inline unsigned int
 makeHostHash(const char *string)
@@ -161,6 +159,7 @@ struct HostDBInfo : public RefCountObj {
   void
   free()
   {
+    Debug("hostdb", "freeing %d bytes at [%p]", (1 << (7 + iobuffer_index)), this);
     ioBufAllocator[iobuffer_index].free_void((void *)(this));
   }
 
diff --git a/iocore/hostdb/P_HostDBProcessor.h b/iocore/hostdb/P_HostDBProcessor.h
index 46432be..4709244 100644
--- a/iocore/hostdb/P_HostDBProcessor.h
+++ b/iocore/hostdb/P_HostDBProcessor.h
@@ -198,15 +198,17 @@ struct HostDBCache {
 
   // TODO configurable number of items in the cache
   Queue<HostDBContinuation, Continuation::Link_link> *pending_dns;
-  Queue<HostDBContinuation, Continuation::Link_link> &pending_dns_for_hash(INK_MD5 &md5);
+  Queue<HostDBContinuation, Continuation::Link_link> &pending_dns_for_hash(const INK_MD5 &md5);
   Queue<HostDBContinuation, Continuation::Link_link> *remoteHostDBQueue;
   HostDBCache();
+  bool is_pending_dns_for_hash(const INK_MD5 &md5);
 };
 
 inline int
 HostDBRoundRobin::index_of(sockaddr const *ip)
 {
-  bool bad = (rrcount <= 0 || rrcount > HOST_DB_MAX_ROUND_ROBIN_INFO || good <= 0 || good > HOST_DB_MAX_ROUND_ROBIN_INFO);
+  bool bad = (rrcount <= 0 || (unsigned int)rrcount > hostdb_round_robin_max_count || good <= 0 ||
+              (unsigned int)good > hostdb_round_robin_max_count);
   if (bad) {
     ink_assert(!"bad round robin size");
     return -1;
@@ -245,7 +247,8 @@ HostDBRoundRobin::select_next(sockaddr const *ip)
 inline HostDBInfo *
 HostDBRoundRobin::find_target(const char *target)
 {
-  bool bad = (rrcount <= 0 || rrcount > HOST_DB_MAX_ROUND_ROBIN_INFO || good <= 0 || good > HOST_DB_MAX_ROUND_ROBIN_INFO);
+  bool bad = (rrcount <= 0 || (unsigned int)rrcount > hostdb_round_robin_max_count || good <= 0 ||
+              (unsigned int)good > hostdb_round_robin_max_count);
   if (bad) {
     ink_assert(!"bad round robin size");
     return nullptr;
@@ -262,7 +265,8 @@ HostDBRoundRobin::find_target(const char *target)
 inline HostDBInfo *
 HostDBRoundRobin::select_best_http(sockaddr const *client_ip, ink_time_t now, int32_t fail_window)
 {
-  bool bad = (rrcount <= 0 || rrcount > HOST_DB_MAX_ROUND_ROBIN_INFO || good <= 0 || good > HOST_DB_MAX_ROUND_ROBIN_INFO);
+  bool bad = (rrcount <= 0 || (unsigned int)rrcount > hostdb_round_robin_max_count || good <= 0 ||
+              (unsigned int)good > hostdb_round_robin_max_count);
 
   if (bad) {
     ink_assert(!"bad round robin size");
@@ -331,7 +335,8 @@ HostDBRoundRobin::select_best_http(sockaddr const *client_ip, ink_time_t now, in
 inline HostDBInfo *
 HostDBRoundRobin::select_best_srv(char *target, InkRand *rand, ink_time_t now, int32_t fail_window)
 {
-  bool bad = (rrcount <= 0 || rrcount > HOST_DB_MAX_ROUND_ROBIN_INFO || good <= 0 || good > HOST_DB_MAX_ROUND_ROBIN_INFO);
+  bool bad = (rrcount <= 0 || (unsigned int)rrcount > hostdb_round_robin_max_count || good <= 0 ||
+              (unsigned int)good > hostdb_round_robin_max_count);
 
   if (bad) {
     ink_assert(!"bad round robin size");
@@ -344,11 +349,11 @@ HostDBRoundRobin::select_best_srv(char *target, InkRand *rand, ink_time_t now, i
   }
 #endif
 
-  int i = 0, len = 0;
+  int i           = 0;
+  int len         = 0;
   uint32_t weight = 0, p = INT32_MAX;
   HostDBInfo *result = nullptr;
-  HostDBInfo *infos[HOST_DB_MAX_ROUND_ROBIN_INFO];
-
+  HostDBInfo *infos[good];
   do {
     // if the real isn't alive-- exclude it from selection
     if (!info(i).is_alive(now, fail_window)) {
@@ -519,7 +524,7 @@ is_dotted_form_hostname(const char *c)
 }
 
 inline Queue<HostDBContinuation> &
-HostDBCache::pending_dns_for_hash(INK_MD5 &md5)
+HostDBCache::pending_dns_for_hash(const INK_MD5 &md5)
 {
   return pending_dns[this->refcountcache->partition_for_key(md5.fold())];
 }
diff --git a/iocore/net/Makefile.am b/iocore/net/Makefile.am
index dcebadf..38780fe 100644
--- a/iocore/net/Makefile.am
+++ b/iocore/net/Makefile.am
@@ -72,7 +72,7 @@ test_UDPNet_LDADD = \
   $(top_builddir)/mgmt/libmgmt_p.la \
   $(top_builddir)/lib/records/librecords_p.a \
   $(top_builddir)/lib/ts/libtsutil.la \
-  @LIBTCL@ @HWLOC_LIBS@
+  @LIBTCL@ @HWLOC_LIBS@ @OPENSSL_LIBS@
 
 test_UDPNet_SOURCES = \
   test_I_UDPNet.cc
diff --git a/iocore/net/NetVCTest.cc b/iocore/net/NetVCTest.cc
index 17401d5..07d7c6b 100644
--- a/iocore/net/NetVCTest.cc
+++ b/iocore/net/NetVCTest.cc
@@ -308,7 +308,7 @@ NetVCTest::write_handler(int event)
     if (write_vio->ndone < bytes_to_send) {
       int left_to_send = bytes_to_send - actual_bytes_sent;
       ink_assert(left_to_send >= 0);
-      int to_fill = MIN(left_to_send, write_bytes_to_add_per);
+      int to_fill = std::min(left_to_send, write_bytes_to_add_per);
       actual_bytes_sent += fill_buffer(write_buffer, &write_seed, to_fill);
       write_vio->reenable();
     }
diff --git a/iocore/net/P_InkBulkIO.h b/iocore/net/P_InkBulkIO.h
index 46df82b..f24b95a 100644
--- a/iocore/net/P_InkBulkIO.h
+++ b/iocore/net/P_InkBulkIO.h
@@ -141,7 +141,7 @@ struct InkBulkIORequest {
  */
 #define INKBIO_MAX_PKTS_PER_REQ_BLOCK                                                              \
   ((INKBIO_PKT_SIZE_WO_UDPHDR - (sizeof(struct InkBulkIORequest) + sizeof(struct InkBulkIOPkt))) / \
-   MAX((sizeof(struct InkBulkIORequest)), (sizeof(struct InkBulkIOPkt))))
+   std::max((sizeof(struct InkBulkIORequest)), (sizeof(struct InkBulkIOPkt))))
 
 /*
  * Requests are just block-ids---the block id points to the inkbio-block
diff --git a/iocore/net/P_SSLConfig.h b/iocore/net/P_SSLConfig.h
index 7728a41..f15df36 100644
--- a/iocore/net/P_SSLConfig.h
+++ b/iocore/net/P_SSLConfig.h
@@ -162,7 +162,7 @@ private:
 struct SSLTicketParams : public ConfigInfo {
   ssl_ticket_key_block *default_global_keyblock;
   char *ticket_key_filename;
-  void LoadTicket();
+  bool LoadTicket();
   void cleanup();
 
   ~SSLTicketParams() { cleanup(); }
diff --git a/iocore/net/P_SSLNetVConnection.h b/iocore/net/P_SSLNetVConnection.h
index 59733d3..9fcffb1 100644
--- a/iocore/net/P_SSLNetVConnection.h
+++ b/iocore/net/P_SSLNetVConnection.h
@@ -90,6 +90,7 @@ class SSLNetVConnection : public UnixNetVConnection
 
 public:
   int sslStartHandShake(int event, int &err) override;
+  void clear() override;
   void free(EThread *t) override;
 
   virtual void
@@ -291,6 +292,7 @@ public:
 
   SSL *ssl                         = nullptr;
   ink_hrtime sslHandshakeBeginTime = 0;
+  ink_hrtime sslHandshakeEndTime   = 0;
   ink_hrtime sslLastWriteTime      = 0;
   int64_t sslTotalBytesSent        = 0;
 
diff --git a/iocore/net/P_UnixNet.h b/iocore/net/P_UnixNet.h
index b570b91..ce76c45 100644
--- a/iocore/net/P_UnixNet.h
+++ b/iocore/net/P_UnixNet.h
@@ -196,7 +196,8 @@ public:
   int startNetEvent(int event, Event *data);
   int mainNetEvent(int event, Event *data);
   int mainNetEventExt(int event, Event *data);
-  void process_enabled_list(NetHandler *);
+  void process_enabled_list();
+  void process_ready_list();
   void manage_keep_alive_queue();
   bool manage_active_queue(bool ignore_queue_size);
   void add_to_keep_alive_queue(UnixNetVConnection *vc);
@@ -205,6 +206,44 @@ public:
   void remove_from_active_queue(UnixNetVConnection *vc);
   void configure_per_thread();
 
+  /**
+    Start to handle read & write event on a UnixNetVConnection.
+    Initial the socket fd of netvc for polling system.
+    Only be called when holding the mutex of this NetHandler.
+
+    @param netvc UnixNetVConnection to be managed by this NetHandler.
+    @return 0 on success, netvc->nh set to this NetHandler.
+            -ERRNO on failure.
+   */
+  int startIO(UnixNetVConnection *netvc);
+  /**
+    Stop to handle read & write event on a UnixNetVConnection.
+    Remove the socket fd of netvc from polling system.
+    Only be called when holding the mutex of this NetHandler and must call stopCop(netvc) first.
+
+    @param netvc UnixNetVConnection to be released.
+    @return netvc->nh set to nullptr.
+   */
+  void stopIO(UnixNetVConnection *netvc);
+
+  /**
+    Start to handle active timeout and inactivity timeout on a UnixNetVConnection.
+    Put the netvc into open_list. All NetVCs in the open_list is checked for timeout by InactivityCop.
+    Only be called when holding the mutex of this NetHandler and must call startIO(netvc) first.
+
+    @param netvc UnixNetVConnection to be managed by InactivityCop
+   */
+  void startCop(UnixNetVConnection *netvc);
+  /* *
+    Stop to handle active timeout and inactivity on a UnixNetVConnection.
+    Remove the netvc from open_list and cop_list.
+    Also remove the netvc from keep_alive_queue and active_queue if its context is IN.
+    Only be called when holding the mutex of this NetHandler.
+
+    @param netvc UnixNetVConnection to be released.
+   */
+  void stopCop(UnixNetVConnection *netvc);
+
   NetHandler();
 
 private:
@@ -638,4 +677,70 @@ EventIO::stop()
   return 0;
 }
 
+TS_INLINE int
+NetHandler::startIO(UnixNetVConnection *netvc)
+{
+  ink_assert(this->mutex->thread_holding == this_ethread());
+  ink_assert(netvc->thread == this_ethread());
+  int res = 0;
+
+  PollDescriptor *pd = get_PollDescriptor(trigger_event->ethread);
+  if (netvc->ep.start(pd, netvc, EVENTIO_READ | EVENTIO_WRITE) < 0) {
+    res = errno;
+    // EEXIST should be ok, though it should have been cleared before we got back here
+    if (errno != EEXIST) {
+      Debug("iocore_net", "NetHandler::startIO : failed on EventIO::start, errno = [%d](%s)", errno, strerror(errno));
+      return -res;
+    }
+  }
+
+  if (netvc->read.triggered == 1) {
+    read_ready_list.enqueue(netvc);
+  }
+  netvc->nh = this;
+  return res;
+}
+
+TS_INLINE void
+NetHandler::stopIO(UnixNetVConnection *netvc)
+{
+  ink_release_assert(netvc->nh == this);
+
+  netvc->ep.stop();
+
+  read_ready_list.remove(netvc);
+  write_ready_list.remove(netvc);
+  if (netvc->read.in_enabled_list) {
+    read_enable_list.remove(netvc);
+    netvc->read.in_enabled_list = 0;
+  }
+  if (netvc->write.in_enabled_list) {
+    write_enable_list.remove(netvc);
+    netvc->write.in_enabled_list = 0;
+  }
+
+  netvc->nh = nullptr;
+}
+
+TS_INLINE void
+NetHandler::startCop(UnixNetVConnection *netvc)
+{
+  ink_assert(this->mutex->thread_holding == this_ethread());
+  ink_release_assert(netvc->nh == this);
+  ink_assert(!open_list.in(netvc));
+
+  open_list.enqueue(netvc);
+}
+
+TS_INLINE void
+NetHandler::stopCop(UnixNetVConnection *netvc)
+{
+  ink_release_assert(netvc->nh == this);
+
+  open_list.remove(netvc);
+  cop_list.remove(netvc);
+  remove_from_keep_alive_queue(netvc);
+  remove_from_active_queue(netvc);
+}
+
 #endif
diff --git a/iocore/net/P_UnixNetVConnection.h b/iocore/net/P_UnixNetVConnection.h
index be3fbd0..55e3c41 100644
--- a/iocore/net/P_UnixNetVConnection.h
+++ b/iocore/net/P_UnixNetVConnection.h
@@ -276,6 +276,7 @@ public:
    * This is logic is invoked when the NetVC object is created in a new thread context
    */
   virtual int populate(Connection &con, Continuation *c, void *arg);
+  virtual void clear();
   virtual void free(EThread *t);
 
   ink_hrtime get_inactivity_timeout() override;
diff --git a/iocore/net/SSLConfig.cc b/iocore/net/SSLConfig.cc
index 803ca57..3b15a8a 100644
--- a/iocore/net/SSLConfig.cc
+++ b/iocore/net/SSLConfig.cc
@@ -141,16 +141,16 @@ set_paths_helper(const char *path, const char *filename, char **final_path, char
 {
   if (final_path) {
     if (path && path[0] != '/') {
-      *final_path = RecConfigReadPrefixPath(nullptr, path);
+      *final_path = ats_stringdup(RecConfigReadPrefixPath(nullptr, path));
     } else if (!path || path[0] == '\0') {
-      *final_path = RecConfigReadConfigDir();
+      *final_path = ats_stringdup(RecConfigReadConfigDir());
     } else {
       *final_path = ats_strdup(path);
     }
   }
 
-  if (final_filename) {
-    *final_filename = filename ? Layout::get()->relative_to(path, filename) : nullptr;
+  if (final_filename && path) {
+    *final_filename = filename ? ats_stringdup(Layout::get()->relative_to(path, filename)) : nullptr;
   }
 }
 
@@ -176,7 +176,7 @@ SSLConfigParams::initialize()
   REC_ReadConfigInt32(clientCertLevel, "proxy.config.ssl.client.certification_level");
   REC_ReadConfigStringAlloc(cipherSuite, "proxy.config.ssl.server.cipher_suite");
   REC_ReadConfigStringAlloc(client_cipherSuite, "proxy.config.ssl.client.cipher_suite");
-  dhparamsFile = RecConfigReadConfigPath("proxy.config.ssl.server.dhparams_file");
+  dhparamsFile = ats_stringdup(RecConfigReadConfigPath("proxy.config.ssl.server.dhparams_file"));
 
   int options;
   int client_ssl_options = 0;
@@ -256,7 +256,7 @@ SSLConfigParams::initialize()
   set_paths_helper(serverCertRelativePath, nullptr, &serverCertPathOnly, nullptr);
   ats_free(serverCertRelativePath);
 
-  configFilePath = RecConfigReadConfigPath("proxy.config.ssl.server.multicert.filename");
+  configFilePath = ats_stringdup(RecConfigReadConfigPath("proxy.config.ssl.server.multicert.filename"));
   REC_ReadConfigInteger(configExitOnLoadError, "proxy.config.ssl.server.multicert.exit_on_load_fail");
 
   REC_ReadConfigStringAlloc(ssl_server_private_key_path, "proxy.config.ssl.server.private_key.path");
@@ -523,27 +523,31 @@ SSLCertificateConfig::release(SSLCertLookup *lookup)
   configProcessor.release(configid, lookup);
 }
 
-void
+bool
 SSLTicketParams::LoadTicket()
 {
   cleanup();
 
 #if HAVE_OPENSSL_SESSION_TICKETS
+  ssl_ticket_key_block *keyblock = nullptr;
 
   SSLConfig::scoped_config params;
 
   if (REC_ReadConfigStringAlloc(ticket_key_filename, "proxy.config.ssl.server.ticket_key.filename") == REC_ERR_OKAY &&
       ticket_key_filename != nullptr) {
     ats_scoped_str ticket_key_path(Layout::relative_to(params->serverCertPathOnly, ticket_key_filename));
-    default_global_keyblock = ssl_create_ticket_keyblock(ticket_key_path);
+    keyblock = ssl_create_ticket_keyblock(ticket_key_path);
   } else {
-    default_global_keyblock = ssl_create_ticket_keyblock(nullptr);
+    keyblock = ssl_create_ticket_keyblock(nullptr);
   }
-  if (!default_global_keyblock) {
-    Fatal("Could not load Ticket Key from %s", ticket_key_filename);
-    return;
+  if (!keyblock) {
+    Error("ticket key reloaded from %s", ticket_key_filename);
+    return false;
   }
+  default_global_keyblock = keyblock;
+
   Debug("ssl", "ticket key reloaded from %s", ticket_key_filename);
+  return true;
 
 #endif
 }
@@ -554,7 +558,10 @@ SSLTicketKeyConfig::startup()
   auto sslTicketKey = new ConfigUpdateHandler<SSLTicketKeyConfig>();
 
   sslTicketKey->attach("proxy.config.ssl.server.ticket_key.filename");
-  reconfigure();
+  SSLConfig::scoped_config params;
+  if (!reconfigure() && params->configExitOnLoadError) {
+    Fatal("Failed to load SSL ticket key file");
+  }
 }
 
 bool
@@ -562,8 +569,12 @@ SSLTicketKeyConfig::reconfigure()
 {
   SSLTicketParams *ticketKey = new SSLTicketParams();
 
-  if (ticketKey)
-    ticketKey->LoadTicket();
+  if (ticketKey) {
+    if (!ticketKey->LoadTicket()) {
+      delete ticketKey;
+      return false;
+    }
+  }
 
   configid = configProcessor.set(configid, ticketKey);
   return true;
diff --git a/iocore/net/SSLNetVConnection.cc b/iocore/net/SSLNetVConnection.cc
index 9d0ac5e..732914b 100644
--- a/iocore/net/SSLNetVConnection.cc
+++ b/iocore/net/SSLNetVConnection.cc
@@ -336,7 +336,7 @@ SSLNetVConnection::read_raw_data()
   int64_t r          = 0;
   int64_t total_read = 0;
   int64_t rattempted = 0;
-  char *buffer       = 0;
+  char *buffer       = nullptr;
   int buf_len;
   IOBufferBlock *b = this->handShakeBuffer->first_write_block();
 
@@ -847,41 +847,8 @@ SSLNetVConnection::do_io_close(int lerrno)
 }
 
 void
-SSLNetVConnection::free(EThread *t)
+SSLNetVConnection::clear()
 {
-  got_remote_addr = false;
-  got_local_addr  = false;
-  read.vio.mutex.clear();
-  write.vio.mutex.clear();
-  this->mutex.clear();
-  action_.mutex.clear();
-  this->ep.stop();
-  this->con.close();
-  flags = 0;
-
-  SET_CONTINUATION_HANDLER(this, (SSLNetVConnHandler)&SSLNetVConnection::startEvent);
-
-  if (nh) {
-    nh->read_ready_list.remove(this);
-    nh->write_ready_list.remove(this);
-    nh = nullptr;
-  }
-
-  read.triggered      = 0;
-  write.triggered     = 0;
-  read.enabled        = 0;
-  write.enabled       = 0;
-  read.vio._cont      = nullptr;
-  write.vio._cont     = nullptr;
-  read.vio.vc_server  = nullptr;
-  write.vio.vc_server = nullptr;
-
-  closed = 0;
-  options.reset();
-  con.close();
-
-  ink_assert(con.fd == NO_FD);
-
   if (ssl != nullptr) {
     SSL_free(ssl);
     ssl = nullptr;
@@ -902,6 +869,21 @@ SSLNetVConnection::free(EThread *t)
   free_handshake_buffers();
   sslTrace = false;
 
+  super::clear();
+}
+void
+SSLNetVConnection::free(EThread *t)
+{
+  ink_release_assert(t == this_ethread());
+
+  // close socket fd
+  con.close();
+
+  clear();
+  SET_CONTINUATION_HANDLER(this, (SSLNetVConnHandler)&SSLNetVConnection::startEvent);
+  ink_assert(con.fd == NO_FD);
+  ink_assert(t == this_ethread());
+
   if (from_accept_thread) {
     sslNetVCAllocator.free(this);
   } else {
@@ -1111,7 +1093,7 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err)
     SSLDebugVC(this, "SSL handshake error: %s (%d), errno=%d", SSLErrorName(ssl_error), ssl_error, err);
 
     // start a blind tunnel if tr-pass is set and data does not look like ClientHello
-    char *buf = handShakeBuffer ? handShakeBuffer->buf() : NULL;
+    char *buf = handShakeBuffer ? handShakeBuffer->buf() : nullptr;
     if (getTransparentPassThrough() && buf && *buf != SSL_OP_HANDSHAKE) {
       SSLDebugVC(this, "Data does not look like SSL handshake, starting blind tunnel");
       this->attributes     = HttpProxyPort::TRANSPORT_BLIND_TUNNEL;
@@ -1139,9 +1121,10 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err)
     // do we want to include cert info in trace?
 
     if (sslHandshakeBeginTime) {
-      const ink_hrtime ssl_handshake_time = Thread::get_hrtime() - sslHandshakeBeginTime;
+      sslHandshakeEndTime                 = Thread::get_hrtime();
+      const ink_hrtime ssl_handshake_time = sslHandshakeEndTime - sslHandshakeBeginTime;
+
       Debug("ssl", "ssl handshake time:%" PRId64, ssl_handshake_time);
-      sslHandshakeBeginTime = 0;
       SSL_INCREMENT_DYN_STAT_EX(ssl_total_handshake_time_stat, ssl_handshake_time);
       SSL_INCREMENT_DYN_STAT(ssl_total_success_handshake_count_in_stat);
     }
diff --git a/iocore/net/SSLUtils.cc b/iocore/net/SSLUtils.cc
index a01510e..2befab2 100644
--- a/iocore/net/SSLUtils.cc
+++ b/iocore/net/SSLUtils.cc
@@ -1421,7 +1421,7 @@ ssl_index_certificate(SSLCertLookup *lookup, SSLCertContext const &cc, X509 *cer
 static void
 ssl_callback_info(const SSL *ssl, int where, int ret)
 {
-  Debug("ssl", "ssl_callback_info ssl: %p where: %d ret: %d", ssl, where, ret);
+  Debug("ssl", "ssl_callback_info ssl: %p where: %d ret: %d State: %s", ssl, where, ret, SSL_state_string_long(ssl));
   SSLNetVConnection *netvc = SSLNetVCAccess(ssl);
 
   if ((where & SSL_CB_ACCEPT_LOOP) && netvc->getSSLHandShakeComplete() == true &&
@@ -1626,13 +1626,15 @@ SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config *sslMu
         // Now, load any additional certificate chains specified in this entry.
         if (sslMultCertSettings->ca) {
           const char *ca_name = ca_tok.getNext();
-          ats_scoped_str completeServerCertChainPath(Layout::relative_to(params->serverCertPathOnly, ca_name));
-          if (!SSL_CTX_add_extra_chain_cert_file(ctx, completeServerCertChainPath)) {
-            SSLError("failed to load certificate chain from %s", (const char *)completeServerCertChainPath);
-            goto fail;
-          }
-          if (SSLConfigParams::load_ssl_file_cb) {
-            SSLConfigParams::load_ssl_file_cb(completeServerCertChainPath, CONFIG_FLAG_UNVERSIONED);
+          if (ca_name != nullptr) {
+            ats_scoped_str completeServerCertChainPath(Layout::relative_to(params->serverCertPathOnly, ca_name));
+            if (!SSL_CTX_add_extra_chain_cert_file(ctx, completeServerCertChainPath)) {
+              SSLError("failed to load certificate chain from %s", (const char *)completeServerCertChainPath);
+              goto fail;
+            }
+            if (SSLConfigParams::load_ssl_file_cb) {
+              SSLConfigParams::load_ssl_file_cb(completeServerCertChainPath, CONFIG_FLAG_UNVERSIONED);
+            }
           }
         }
       }
@@ -1720,7 +1722,7 @@ SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config *sslMu
     goto fail;
   }
   EVP_MD_CTX_free(digest);
-  digest = NULL;
+  digest = nullptr;
 
   if (SSL_CTX_set_session_id_context(ctx, hash_buf, hash_len) == 0) {
     SSLError("SSL_CTX_set_session_id_context failed");
diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc
index 435d477..4c46342 100644
--- a/iocore/net/UnixNet.cc
+++ b/iocore/net/UnixNet.cc
@@ -348,32 +348,98 @@ NetHandler::startNetEvent(int event, Event *e)
 // Move VC's enabled on a different thread to the ready list
 //
 void
-NetHandler::process_enabled_list(NetHandler *nh)
+NetHandler::process_enabled_list()
 {
   UnixNetVConnection *vc = nullptr;
 
-  SListM(UnixNetVConnection, NetState, read, enable_link) rq(nh->read_enable_list.popall());
+  SListM(UnixNetVConnection, NetState, read, enable_link) rq(read_enable_list.popall());
   while ((vc = rq.pop())) {
     vc->ep.modify(EVENTIO_READ);
     vc->ep.refresh(EVENTIO_READ);
     vc->read.in_enabled_list = 0;
     if ((vc->read.enabled && vc->read.triggered) || vc->closed) {
-      nh->read_ready_list.in_or_enqueue(vc);
+      read_ready_list.in_or_enqueue(vc);
     }
   }
 
-  SListM(UnixNetVConnection, NetState, write, enable_link) wq(nh->write_enable_list.popall());
+  SListM(UnixNetVConnection, NetState, write, enable_link) wq(write_enable_list.popall());
   while ((vc = wq.pop())) {
     vc->ep.modify(EVENTIO_WRITE);
     vc->ep.refresh(EVENTIO_WRITE);
     vc->write.in_enabled_list = 0;
     if ((vc->write.enabled && vc->write.triggered) || vc->closed) {
-      nh->write_ready_list.in_or_enqueue(vc);
+      write_ready_list.in_or_enqueue(vc);
     }
   }
 }
 
 //
+// Walk through the ready list
+//
+void
+NetHandler::process_ready_list()
+{
+  UnixNetVConnection *vc = nullptr;
+
+#if defined(USE_EDGE_TRIGGER)
+  // UnixNetVConnection *
+  while ((vc = read_ready_list.dequeue())) {
+    // Initialize the thread-local continuation flags
+    set_cont_flags(vc->control_flags);
+    if (vc->closed)
+      close_UnixNetVConnection(vc, trigger_event->ethread);
+    else if (vc->read.enabled && vc->read.triggered)
+      vc->net_read_io(this, trigger_event->ethread);
+    else if (!vc->read.enabled) {
+      read_ready_list.remove(vc);
+#if defined(solaris)
+      if (vc->read.triggered && vc->write.enabled) {
+        vc->ep.modify(-EVENTIO_READ);
+        vc->ep.refresh(EVENTIO_WRITE);
+        vc->writeReschedule(this);
+      }
+#endif
+    }
+  }
+  while ((vc = write_ready_list.dequeue())) {
+    set_cont_flags(vc->control_flags);
+    if (vc->closed)
+      close_UnixNetVConnection(vc, trigger_event->ethread);
+    else if (vc->write.enabled && vc->write.triggered)
+      write_to_net(this, vc, trigger_event->ethread);
+    else if (!vc->write.enabled) {
+      write_ready_list.remove(vc);
+#if defined(solaris)
+      if (vc->write.triggered && vc->read.enabled) {
+        vc->ep.modify(-EVENTIO_WRITE);
+        vc->ep.refresh(EVENTIO_READ);
+        vc->readReschedule(this);
+      }
+#endif
+    }
+  }
+#else  /* !USE_EDGE_TRIGGER */
+  while ((vc = read_ready_list.dequeue())) {
+    diags->set_override(vc->control.debug_override);
+    if (vc->closed)
+      close_UnixNetVConnection(vc, trigger_event->ethread);
+    else if (vc->read.enabled && vc->read.triggered)
+      vc->net_read_io(this, trigger_event->ethread);
+    else if (!vc->read.enabled)
+      vc->ep.modify(-EVENTIO_READ);
+  }
+  while ((vc = write_ready_list.dequeue())) {
+    diags->set_override(vc->control.debug_override);
+    if (vc->closed)
+      close_UnixNetVConnection(vc, trigger_event->ethread);
+    else if (vc->write.enabled && vc->write.triggered)
+      write_to_net(this, vc, trigger_event->ethread);
+    else if (!vc->write.enabled)
+      vc->ep.modify(-EVENTIO_WRITE);
+  }
+#endif /* !USE_EDGE_TRIGGER */
+}
+//
 // The main event for NetHandler
 // This is called every proxy.config.net.event_period, and handles all IO operations scheduled
 // for this period.
@@ -385,61 +451,18 @@ NetHandler::mainNetEvent(int event, Event *e)
   (void)event;
   (void)e;
   EventIO *epd = nullptr;
-  int poll_timeout;
 
   NET_INCREMENT_DYN_STAT(net_handler_run_stat);
 
-  process_enabled_list(this);
-  if (likely(!read_ready_list.empty() || !write_ready_list.empty() || !read_enable_list.empty() || !write_enable_list.empty())) {
-    poll_timeout = 0; // poll immediately returns -- we have triggered stuff to process right now
-  } else {
-    poll_timeout = net_config_poll_timeout;
-  }
+  process_enabled_list();
 
+  // Polling event by PollCont
+  PollCont *p = get_PollCont(trigger_event->ethread);
+  p->handleEvent(EVENT_NONE, nullptr);
+
+  // Get & Process polling result
   PollDescriptor *pd     = get_PollDescriptor(trigger_event->ethread);
   UnixNetVConnection *vc = nullptr;
-#if TS_USE_EPOLL
-  pd->result = epoll_wait(pd->epoll_fd, pd->ePoll_Triggered_Events, POLL_DESCRIPTOR_SIZE, poll_timeout);
-  NetDebug("iocore_net_main_poll", "[NetHandler::mainNetEvent] epoll_wait(%d,%d), result=%d", pd->epoll_fd, poll_timeout,
-           pd->result);
-#elif TS_USE_KQUEUE
-  struct timespec tv;
-  tv.tv_sec  = poll_timeout / 1000;
-  tv.tv_nsec = 1000000 * (poll_timeout % 1000);
-  pd->result = kevent(pd->kqueue_fd, nullptr, 0, pd->kq_Triggered_Events, POLL_DESCRIPTOR_SIZE, &tv);
-  NetDebug("iocore_net_main_poll", "[NetHandler::mainNetEvent] kevent(%d,%d), result=%d", pd->kqueue_fd, poll_timeout, pd->result);
-#elif TS_USE_PORT
-  int retval;
-  timespec_t ptimeout;
-  ptimeout.tv_sec  = poll_timeout / 1000;
-  ptimeout.tv_nsec = 1000000 * (poll_timeout % 1000);
-  unsigned nget    = 1;
-  if ((retval = port_getn(pd->port_fd, pd->Port_Triggered_Events, POLL_DESCRIPTOR_SIZE, &nget, &ptimeout)) < 0) {
-    pd->result = 0;
-    switch (errno) {
-    case EINTR:
-    case EAGAIN:
-    case ETIME:
-      if (nget > 0) {
-        pd->result = (int)nget;
-      }
-      break;
-    default:
-      ink_assert(!"unhandled port_getn() case:");
-      break;
-    }
-  } else {
-    pd->result = (int)nget;
-  }
-  NetDebug("iocore_net_main_poll", "[NetHandler::mainNetEvent] %d[%s]=port_getn(%d,%p,%d,%d,%d),results(%d)", retval,
-           retval < 0 ? strerror(errno) : "ok", pd->port_fd, pd->Port_Triggered_Events, POLL_DESCRIPTOR_SIZE, nget, poll_timeout,
-           pd->result);
-
-#else
-#error port me
-#endif
-
-  vc = nullptr;
   for (int x = 0; x < pd->result; x++) {
     epd = (EventIO *)get_ev_data(pd, x);
     if (epd->type == EVENTIO_READWRITE_VC) {
@@ -486,63 +509,7 @@ NetHandler::mainNetEvent(int event, Event *e)
 
   pd->result = 0;
 
-#if defined(USE_EDGE_TRIGGER)
-  // UnixNetVConnection *
-  while ((vc = read_ready_list.dequeue())) {
-    // Initialize the thread-local continuation flags
-    set_cont_flags(vc->control_flags);
-    if (vc->closed)
-      close_UnixNetVConnection(vc, trigger_event->ethread);
-    else if (vc->read.enabled && vc->read.triggered)
-      vc->net_read_io(this, trigger_event->ethread);
-    else if (!vc->read.enabled) {
-      read_ready_list.remove(vc);
-#if defined(solaris)
-      if (vc->read.triggered && vc->write.enabled) {
-        vc->ep.modify(-EVENTIO_READ);
-        vc->ep.refresh(EVENTIO_WRITE);
-        vc->writeReschedule(this);
-      }
-#endif
-    }
-  }
-  while ((vc = write_ready_list.dequeue())) {
-    set_cont_flags(vc->control_flags);
-    if (vc->closed)
-      close_UnixNetVConnection(vc, trigger_event->ethread);
-    else if (vc->write.enabled && vc->write.triggered)
-      write_to_net(this, vc, trigger_event->ethread);
-    else if (!vc->write.enabled) {
-      write_ready_list.remove(vc);
-#if defined(solaris)
-      if (vc->write.triggered && vc->read.enabled) {
-        vc->ep.modify(-EVENTIO_WRITE);
-        vc->ep.refresh(EVENTIO_READ);
-        vc->readReschedule(this);
-      }
-#endif
-    }
-  }
-#else  /* !USE_EDGE_TRIGGER */
-  while ((vc = read_ready_list.dequeue())) {
-    diags->set_override(vc->control.debug_override);
-    if (vc->closed)
-      close_UnixNetVConnection(vc, trigger_event->ethread);
-    else if (vc->read.enabled && vc->read.triggered)
-      vc->net_read_io(this, trigger_event->ethread);
-    else if (!vc->read.enabled)
-      vc->ep.modify(-EVENTIO_READ);
-  }
-  while ((vc = write_ready_list.dequeue())) {
-    diags->set_override(vc->control.debug_override);
-    if (vc->closed)
-      close_UnixNetVConnection(vc, trigger_event->ethread);
-    else if (vc->write.enabled && vc->write.triggered)
-      write_to_net(this, vc, trigger_event->ethread);
-    else if (!vc->write.enabled)
-      vc->ep.modify(-EVENTIO_WRITE);
-  }
-#endif /* !USE_EDGE_TRIGGER */
+  process_ready_list();
 
   return EVENT_CONT;
 }
diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc
index 1ce7fce..9c7da81 100644
--- a/iocore/net/UnixNetAccept.cc
+++ b/iocore/net/UnixNetAccept.cc
@@ -92,6 +92,12 @@ net_accept(NetAccept *na, void *ep, bool blockable)
     vc->action_     = *na->action_;
     vc->set_is_transparent(na->opt.f_inbound_transparent);
     vc->set_context(NET_VCONNECTION_IN);
+#ifdef USE_EDGE_TRIGGER
+    // Set the vc as triggered and place it in the read ready queue later in case there is already data on the socket.
+    if (na->server.http_accept_filter) {
+      vc->read.triggered = 1;
+    }
+#endif
     SET_CONTINUATION_HANDLER(vc, (NetVConnHandler)&UnixNetVConnection::acceptEvent);
 
     if (e->ethread->is_event_type(na->opt.etype)) {
@@ -292,6 +298,12 @@ NetAccept::do_blocking_accept(EThread *t)
     vc->apply_options();
     vc->set_context(NET_VCONNECTION_IN);
     vc->accept_object = this;
+#ifdef USE_EDGE_TRIGGER
+    // Set the vc as triggered and place it in the read ready queue later in case there is already data on the socket.
+    if (server.http_accept_filter) {
+      vc->read.triggered = 1;
+    }
+#endif
     SET_CONTINUATION_HANDLER(vc, (NetVConnHandler)&UnixNetVConnection::acceptEvent);
     // eventProcessor.schedule_imm(vc, getEtype());
     eventProcessor.schedule_imm_signal(vc, opt.etype);
@@ -352,7 +364,6 @@ NetAccept::acceptFastEvent(int event, void *ep)
   int bufsz, res = 0;
   Connection con;
 
-  PollDescriptor *pd     = get_PollDescriptor(e->ethread);
   UnixNetVConnection *vc = nullptr;
   int loop               = accept_till_done;
 
@@ -440,36 +451,18 @@ NetAccept::acceptFastEvent(int event, void *ep)
     vc->options.ip_family   = opt.ip_family;
     vc->apply_options();
     vc->set_context(NET_VCONNECTION_IN);
-    SET_CONTINUATION_HANDLER(vc, (NetVConnHandler)&UnixNetVConnection::mainEvent);
-
-    // set thread and nh as acceptEvent does
-    vc->thread = e->ethread;
-    vc->nh     = get_NetHandler(e->ethread);
-
-    if (vc->ep.start(pd, vc, EVENTIO_READ | EVENTIO_WRITE) < 0) {
-      Warning("[NetAccept::acceptFastEvent]: Error in inserting fd[%d] in kevent\n", vc->con.fd);
-      close_UnixNetVConnection(vc, e->ethread);
-      return EVENT_DONE;
-    }
-
-    ink_assert(vc->nh->mutex->thread_holding == this_ethread());
-    vc->set_inactivity_timeout(0);
-    vc->nh->open_list.enqueue(vc);
-
+    vc->action_ = *action_;
 #ifdef USE_EDGE_TRIGGER
-    // Set the vc as triggered and place it in the read ready queue in case there is already data on the socket.
-    Debug("iocore_net", "acceptEvent : Setting triggered and adding to the read ready queue");
-    vc->read.triggered = 1;
-    vc->nh->read_ready_list.enqueue(vc);
-#endif
-
-    if (!action_->cancelled) {
-      // We must be holding the lock already to do later do_io_read's
-      SCOPED_MUTEX_LOCK(lock, vc->mutex, e->ethread);
-      action_->continuation->handleEvent(NET_EVENT_ACCEPT, vc);
-    } else {
-      close_UnixNetVConnection(vc, e->ethread);
+    // Set the vc as triggered and place it in the read ready queue later in case there is already data on the socket.
+    if (server.http_accept_filter) {
+      vc->read.triggered = 1;
     }
+#endif
+    SET_CONTINUATION_HANDLER(vc, (NetVConnHandler)&UnixNetVConnection::acceptEvent);
+    // We must be holding the lock already to do later do_io_read's
+    SCOPED_MUTEX_LOCK(lock, vc->mutex, e->ethread);
+    vc->handleEvent(EVENT_NONE, nullptr);
+    vc = nullptr;
   } while (loop);
 
 Ldone:
@@ -478,9 +471,6 @@ Ldone:
 Lerror:
   server.close();
   e->cancel();
-  if (vc) {
-    vc->free(e->ethread);
-  }
   NET_DECREMENT_DYN_STAT(net_accepts_currently_open_stat);
   delete this;
   return EVENT_DONE;
diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc
index 8b0f740..bcdeaee 100644
--- a/iocore/net/UnixNetVConnection.cc
+++ b/iocore/net/UnixNetVConnection.cc
@@ -82,11 +82,10 @@ close_UnixNetVConnection(UnixNetVConnection *vc, EThread *t)
   }
   NetHandler *nh = vc->nh;
   vc->cancel_OOB();
-  vc->ep.stop();
-  vc->con.close();
 
   ink_release_assert(vc->thread == t);
 
+  // 1. Cancel timeout
   vc->next_inactivity_timeout_at = 0;
   vc->next_activity_timeout_at   = 0;
 
@@ -94,21 +93,12 @@ close_UnixNetVConnection(UnixNetVConnection *vc, EThread *t)
   vc->active_timeout_in     = 0;
 
   if (nh) {
-    nh->open_list.remove(vc);
-    nh->cop_list.remove(vc);
-    nh->read_ready_list.remove(vc);
-    nh->write_ready_list.remove(vc);
-    if (vc->read.in_enabled_list) {
-      nh->read_enable_list.remove(vc);
-      vc->read.in_enabled_list = 0;
-    }
-    if (vc->write.in_enabled_list) {
-      nh->write_enable_list.remove(vc);
-      vc->write.in_enabled_list = 0;
-    }
-    vc->remove_from_keep_alive_queue();
-    vc->remove_from_active_queue();
+    // 2. Release vc from InactivityCop.
+    nh->stopCop(vc);
+    // 3. Release vc from NetHandler.
+    nh->stopIO(vc);
   }
+  // 4. Clear then deallocate vc.
   vc->free(t);
 }
 
@@ -1118,12 +1108,13 @@ UnixNetVConnection::startEvent(int /* event ATS_UNUSED */, Event *e)
 int
 UnixNetVConnection::acceptEvent(int event, Event *e)
 {
-  thread = e->ethread;
+  EThread *t    = (e == nullptr) ? this_ethread() : e->ethread;
+  NetHandler *h = get_NetHandler(t);
 
-  MUTEX_TRY_LOCK(lock, get_NetHandler(thread)->mutex, e->ethread);
+  MUTEX_TRY_LOCK(lock, h->mutex, t);
   if (!lock.is_locked()) {
     if (event == EVENT_NONE) {
-      thread->schedule_in(this, HRTIME_MSECONDS(net_retry_delay));
+      t->schedule_in(this, HRTIME_MSECONDS(net_retry_delay));
       return EVENT_DONE;
     } else {
       e->schedule_in(HRTIME_MSECONDS(net_retry_delay));
@@ -1131,33 +1122,29 @@ UnixNetVConnection::acceptEvent(int event, Event *e)
     }
   }
 
+  thread = t;
+
   if (action_.cancelled) {
     free(thread);
     return EVENT_DONE;
   }
 
-  SET_HANDLER((NetVConnHandler)&UnixNetVConnection::mainEvent);
-
-  nh                 = get_NetHandler(thread);
-  PollDescriptor *pd = get_PollDescriptor(thread);
-  if (ep.start(pd, this, EVENTIO_READ | EVENTIO_WRITE) < 0) {
-    Debug("iocore_net", "acceptEvent : failed EventIO::start");
-    close_UnixNetVConnection(this, e->ethread);
+  // Send this NetVC to NetHandler and start to polling read & write event.
+  if (h->startIO(this) < 0) {
+    free(t);
     return EVENT_DONE;
   }
 
-  set_inactivity_timeout(0);
-  nh->open_list.enqueue(this);
+  // Setup a timeout callback handler.
+  SET_HANDLER((NetVConnHandler)&UnixNetVConnection::mainEvent);
 
-#ifdef USE_EDGE_TRIGGER
-  // Set the vc as triggered and place it in the read ready queue in case there is already data on the socket.
-  Debug("iocore_net", "acceptEvent : Setting triggered and adding to the read ready queue");
-  read.triggered = 1;
-  nh->read_ready_list.enqueue(this);
-#endif
+  // Send this netvc to InactivityCop.
+  nh->startCop(this);
 
   if (inactivity_timeout_in) {
     UnixNetVConnection::set_inactivity_timeout(inactivity_timeout_in);
+  } else {
+    set_inactivity_timeout(0);
   }
 
   if (active_timeout_in) {
@@ -1246,27 +1233,23 @@ UnixNetVConnection::populate(Connection &con_in, Continuation *c, void *arg)
   this->mutex  = c->mutex;
   this->thread = this_ethread();
 
-  EThread *t = this_ethread();
-  if (ep.start(get_PollDescriptor(t), this, EVENTIO_READ | EVENTIO_WRITE) < 0) {
-    // EEXIST should be ok, though it should have been cleared before we got back here
-    if (errno != EEXIST) {
-      Debug("iocore_net", "populate : Failed to add to epoll list");
-      return EVENT_ERROR;
-    }
-  }
-
-  SET_HANDLER(&UnixNetVConnection::mainEvent);
+  EThread *t    = this_ethread();
+  NetHandler *h = get_NetHandler(t);
 
-  this->nh = get_NetHandler(t);
-  ink_assert(this->nh != nullptr);
-  MUTEX_TRY_LOCK(lock, this->nh->mutex, t);
+  MUTEX_TRY_LOCK(lock, h->mutex, t);
   if (!lock.is_locked()) {
     // Clean up and go home
     return EVENT_ERROR;
   }
-  ink_assert(nh->mutex->thread_holding == this_ethread());
-  ink_assert(!nh->open_list.in(this));
-  this->nh->open_list.enqueue(this);
+
+  if (h->startIO(this) < 0) {
+    Debug("iocore_net", "populate : Failed to add to epoll list");
+    return EVENT_ERROR;
+  }
+
+  ink_assert(this->nh != nullptr);
+  SET_HANDLER(&UnixNetVConnection::mainEvent);
+  this->nh->startCop(this);
   ink_assert(this->con.fd != NO_FD);
   return EVENT_DONE;
 }
@@ -1274,14 +1257,14 @@ UnixNetVConnection::populate(Connection &con_in, Continuation *c, void *arg)
 int
 UnixNetVConnection::connectUp(EThread *t, int fd)
 {
+  ink_assert(get_NetHandler(t)->mutex->thread_holding == this_ethread());
   int res;
 
   thread = t;
   if (check_net_throttle(CONNECT, submit_time)) {
     check_throttle_warning();
-    action_.continuation->handleEvent(NET_EVENT_OPEN_FAILED, (void *)-ENET_THROTTLING);
-    free(t);
-    return CONNECT_FAILURE;
+    res = -ENET_THROTTLING;
+    goto fail;
   }
 
   // Force family to agree with remote (server) address.
@@ -1332,43 +1315,40 @@ UnixNetVConnection::connectUp(EThread *t, int fd)
 
   // Must connect after EventIO::Start() to avoid a race condition
   // when edge triggering is used.
-  if (ep.start(get_PollDescriptor(t), this, EVENTIO_READ | EVENTIO_WRITE) < 0) {
-    res = -errno;
-    Debug("iocore_net", "connectUp : Failed to add to epoll list : %s", strerror(errno));
+  if ((res = get_NetHandler(t)->startIO(this)) < 0) {
     goto fail;
   }
 
   if (fd == NO_FD) {
     res = con.connect(nullptr, options);
     if (res != 0) {
+      // fast stopIO
+      nh = nullptr;
       goto fail;
     }
   }
 
-  // start up next round immediately
-
+  // Setup a timeout callback handler.
   SET_HANDLER(&UnixNetVConnection::mainEvent);
+  // Send this netvc to InactivityCop.
+  nh->startCop(this);
 
-  nh = get_NetHandler(t);
   set_inactivity_timeout(0);
-  nh->open_list.enqueue(this);
-
   ink_assert(!active_timeout_in);
   this->set_local_addr();
   action_.continuation->handleEvent(NET_EVENT_OPEN, this);
   return CONNECT_SUCCESS;
 
 fail:
-  lerrno = errno;
+  lerrno = -res;
   action_.continuation->handleEvent(NET_EVENT_OPEN_FAILED, (void *)(intptr_t)res);
   free(t);
   return CONNECT_FAILURE;
 }
 
 void
-UnixNetVConnection::free(EThread *t)
+UnixNetVConnection::clear()
 {
-  ink_release_assert(t == this_ethread());
   // clear variables for reuse
   this->mutex.clear();
   action_.mutex.clear();
@@ -1377,8 +1357,7 @@ UnixNetVConnection::free(EThread *t)
   attributes      = 0;
   read.vio.mutex.clear();
   write.vio.mutex.clear();
-  flags = 0;
-  SET_CONTINUATION_HANDLER(this, (NetVConnHandler)&UnixNetVConnection::startEvent);
+  flags               = 0;
   nh                  = nullptr;
   read.triggered      = 0;
   write.triggered     = 0;
@@ -1396,6 +1375,18 @@ UnixNetVConnection::free(EThread *t)
   ink_assert(!write.ready_link.prev && !write.ready_link.next);
   ink_assert(!write.enable_link.next);
   ink_assert(!link.next && !link.prev);
+}
+
+void
+UnixNetVConnection::free(EThread *t)
+{
+  ink_release_assert(t == this_ethread());
+
+  // close socket fd
+  con.close();
+
+  clear();
+  SET_CONTINUATION_HANDLER(this, (NetVConnHandler)&UnixNetVConnection::startEvent);
   ink_assert(con.fd == NO_FD);
   ink_assert(t == this_ethread());
 
diff --git a/iocore/net/UnixUDPNet.cc b/iocore/net/UnixUDPNet.cc
index ee2c591..faa75af 100644
--- a/iocore/net/UnixUDPNet.cc
+++ b/iocore/net/UnixUDPNet.cc
@@ -682,10 +682,10 @@ UDPQueue::service(UDPNetHandler *nh)
       // insert into our queue.
       Debug("udp-send", "Adding %p", p);
       if (p->conn->lastPktStartTime == 0) {
-        pktSendStartTime = MAX(now, p->delivery_time);
+        pktSendStartTime = std::max(now, p->delivery_time);
       } else {
         pktSendTime      = p->delivery_time;
-        pktSendStartTime = MAX(MAX(now, pktSendTime), p->delivery_time);
+        pktSendStartTime = std::max(std::max(now, pktSendTime), p->delivery_time);
       }
       p->conn->lastPktStartTime = pktSendStartTime;
       p->delivery_time          = pktSendStartTime;
diff --git a/iocore/net/test_I_UDPNet.cc b/iocore/net/test_I_UDPNet.cc
index 68d708c..24b6e95 100644
--- a/iocore/net/test_I_UDPNet.cc
+++ b/iocore/net/test_I_UDPNet.cc
@@ -36,8 +36,9 @@
 
 #include "diags.i"
 
-static const int port       = 4443;
 static const char payload[] = "hello";
+in_port_t port              = 0;
+int pfd[2]; // Pipe used to signal client with transient port.
 
 /*This implements a standard Unix echo server: just send every udp packet you
   get back to where it came from*/
@@ -58,7 +59,7 @@ EchoServer::start()
   sockaddr_in addr;
   addr.sin_family      = AF_INET;
   addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-  addr.sin_port        = htons(port);
+  addr.sin_port        = 0;
 
   udpNet.UDPBind(static_cast<Continuation *>(this), reinterpret_cast<sockaddr const *>(&addr), 1048576, 1048576);
 
@@ -71,7 +72,12 @@ EchoServer::handle_packet(int event, void *data)
   switch (event) {
   case NET_EVENT_DATAGRAM_OPEN: {
     UDPConnection *con = reinterpret_cast<UDPConnection *>(data);
-    std::cout << "port: " << con->getPortNum() << std::endl;
+    port               = con->getPortNum(); // store this for later signalling.
+    /* For some reason the UDP packet handling isn't fully set up at this time. We need another
+       pass through the event loop for that or the packet is never read even thought it arrives
+       on the port (as reported by ss --udp --numeric --all).
+    */
+    eventProcessor.schedule_in(this, 1, ET_UDP);
     break;
   }
 
@@ -97,8 +103,14 @@ EchoServer::handle_packet(int event, void *data)
     std::exit(EXIT_FAILURE);
   }
 
+  case EVENT_INTERVAL:
+    // Done the extra event loop, signal the client to start.
+    std::cout << "Echo Server port: " << port << std::endl;
+    write(pfd[1], &port, sizeof(port));
+    break;
+
   default:
-    std::cout << "got unknown event" << std::endl;
+    std::cout << "got unknown event [" << event << "]" << std::endl;
     std::exit(EXIT_FAILURE);
   }
 
@@ -131,7 +143,7 @@ udp_echo_server()
   signal(SIGTERM, signal_handler);
 
   EchoServer server;
-  eventProcessor.schedule_imm(&server, ET_UDP);
+  eventProcessor.schedule_in(&server, 1, ET_UDP);
 
   this_thread()->execute();
 }
@@ -146,7 +158,7 @@ udp_client(char *buf)
   }
 
   struct timeval tv;
-  tv.tv_sec  = 1;
+  tv.tv_sec  = 20;
   tv.tv_usec = 0;
 
   setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof(tv));
@@ -180,14 +192,25 @@ REGRESSION_TEST(UDPNet_echo)(RegressionTest *t, int /* atype ATS_UNUSED */, int
   box         = REGRESSION_TEST_PASSED;
   char buf[8] = {0};
 
+  int z = pipe(pfd);
+  if (z < 0) {
+    std::cout << "Unable to create pipe" << std::endl;
+    std::exit(EXIT_FAILURE);
+  }
+
   pid_t pid = fork();
   if (pid < 0) {
     std::cout << "Couldn't fork" << std::endl;
     std::exit(EXIT_FAILURE);
   } else if (pid == 0) {
+    close(pfd[0]);
     udp_echo_server();
   } else {
-    sleep(1);
+    close(pfd[1]);
+    if (read(pfd[0], &port, sizeof(port)) <= 0) {
+      std::cout << "Failed to get signal with port data [" << errno << ']' << std::endl;
+      std::exit(EXIT_FAILURE);
+    }
     udp_client(buf);
 
     kill(pid, SIGTERM);
diff --git a/iocore/utils/I_Machine.h b/iocore/utils/I_Machine.h
index d9296c1..ba6adc3 100644
--- a/iocore/utils/I_Machine.h
+++ b/iocore/utils/I_Machine.h
@@ -33,6 +33,7 @@
 
 #include "ts/ink_inet.h"
 #include "ts/ink_uuid.h"
+#include "ts/ink_hash_table.h"
 
 /**
   The Machine is a simple place holder for the hostname and the ip
@@ -79,11 +80,18 @@ struct Machine {
                     );
   /// @return The global instance of this class.
   static self *instance();
+  bool is_self(const char *name);
+  bool is_self(const IpAddr *ipaddr);
+  void insert_id(char *id);
+  void insert_id(IpAddr *ipaddr);
 
 protected:
   Machine(char const *hostname, sockaddr const *addr);
 
   static self *_instance; ///< Singleton for the class.
+
+  InkHashTable *machine_id_strings;
+  InkHashTable *machine_id_ipaddrs;
 };
 
 #endif
diff --git a/iocore/utils/Machine.cc b/iocore/utils/Machine.cc
index 09bf84c..82e71cb 100644
--- a/iocore/utils/Machine.cc
+++ b/iocore/utils/Machine.cc
@@ -31,6 +31,22 @@
 #include <ifaddrs.h>
 #endif
 
+static void
+make_to_lower_case(const char *name, char *lower_case_name, int buf_len)
+{
+  int name_len = strlen(name);
+  int i;
+
+  if (name_len > (buf_len - 1)) {
+    name_len = buf_len - 1;
+  }
+
+  for (i = 0; i < name_len; i++) {
+    lower_case_name[i] = ParseRules::ink_tolower(name[i]);
+  }
+  lower_case_name[i] = '\0';
+}
+
 // Singleton
 Machine *Machine::_instance = nullptr;
 
@@ -50,9 +66,15 @@ Machine::init(char const *name, sockaddr const *ip)
 }
 
 Machine::Machine(char const *the_hostname, sockaddr const *addr)
-  : hostname(nullptr), hostname_len(0), ip_string_len(0), ip_hex_string_len(0)
+  : hostname(nullptr),
+    hostname_len(0),
+    ip_string_len(0),
+    ip_hex_string_len(0),
+    machine_id_strings(ink_hash_table_create(InkHashTableKeyType_String)),
+    machine_id_ipaddrs(ink_hash_table_create(InkHashTableKeyType_String))
 {
   char localhost[1024];
+  char ip_strbuf[INET6_ADDRSTRLEN];
   int status; // return for system calls.
 
   ip_string[0]     = 0;
@@ -148,15 +170,26 @@ Machine::Machine(char const *the_hostname, sockaddr const *addr)
           continue; // Next!
         }
 
-        if (ats_is_ip4(ifip)) {
-          if (spot_type > ip4_type) {
-            ats_ip_copy(&ip4, ifip);
-            ip4_type = spot_type;
+        if (ats_is_ip4(ifip) || ats_is_ip6(ifip)) {
+          ink_zero(ip_strbuf);
+          ink_zero(localhost);
+          ats_ip_ntop(ifip, ip_strbuf, sizeof(ip_strbuf));
+          insert_id(ip_strbuf);
+          if (getnameinfo(ifip, ats_ip_size(ifip), localhost, sizeof(localhost) - 1, nullptr, 0, 0) == 0) {
+            insert_id(localhost);
           }
-        } else if (ats_is_ip6(ifip)) {
-          if (spot_type > ip6_type) {
-            ats_ip_copy(&ip6, ifip);
-            ip6_type = spot_type;
+          IpAddr *ipaddr = new IpAddr(ifip);
+          insert_id(ipaddr);
+          if (ats_is_ip4(ifip)) {
+            if (spot_type > ip4_type) {
+              ats_ip_copy(&ip4, ifip);
+              ip4_type = spot_type;
+            }
+          } else if (ats_is_ip6(ifip)) {
+            if (spot_type > ip6_type) {
+              ats_ip_copy(&ip6, ifip);
+              ip6_type = spot_type;
+            }
           }
         }
       }
@@ -203,4 +236,78 @@ Machine::Machine(char const *the_hostname, sockaddr const *addr)
 Machine::~Machine()
 {
   ats_free(hostname);
+
+  // release machine_id_strings hash table.
+  InkHashTableIteratorState ht_iter;
+  InkHashTableEntry *ht_entry = nullptr;
+  ht_entry                    = ink_hash_table_iterator_first(machine_id_strings, &ht_iter);
+
+  while (ht_entry != nullptr) {
+    char *value = static_cast<char *>(ink_hash_table_entry_value(machine_id_strings, ht_entry));
+    ats_free(value);
+    ht_entry = ink_hash_table_iterator_next(machine_id_strings, &ht_iter);
+  }
+  ink_hash_table_destroy(machine_id_strings);
+
+  // release machine_id_ipaddrs hash table.
+  ht_entry = nullptr;
+  ht_entry = ink_hash_table_iterator_first(machine_id_ipaddrs, &ht_iter);
+  while (ht_entry != nullptr) {
+    IpAddr *ipaddr = static_cast<IpAddr *>(ink_hash_table_entry_value(machine_id_ipaddrs, ht_entry));
+    delete ipaddr;
+    ht_entry = ink_hash_table_iterator_next(machine_id_ipaddrs, &ht_iter);
+  }
+  ink_hash_table_destroy(machine_id_ipaddrs);
+}
+
+bool
+Machine::is_self(const char *name)
+{
+  char lower_case_name[TS_MAX_HOST_NAME_LEN + 1] = {0};
+  void *value                                    = nullptr;
+
+  if (name == nullptr) {
+    return false;
+  }
+
+  make_to_lower_case(name, lower_case_name, sizeof(lower_case_name));
+
+  return ink_hash_table_lookup(machine_id_strings, lower_case_name, &value) == 1 ? true : false;
+}
+
+bool
+Machine::is_self(const IpAddr *ipaddr)
+{
+  void *value                             = nullptr;
+  char string_value[INET6_ADDRSTRLEN + 1] = {0};
+
+  if (ipaddr == nullptr) {
+    return false;
+  }
+  ipaddr->toString(string_value, sizeof(string_value));
+  return ink_hash_table_lookup(machine_id_ipaddrs, string_value, &value) == 1 ? true : false;
+}
+
+void
+Machine::insert_id(char *id)
+{
+  char lower_case_name[TS_MAX_HOST_NAME_LEN + 1] = {0};
+  char *value                                    = nullptr;
+
+  make_to_lower_case(id, lower_case_name, sizeof(lower_case_name));
+  value = ats_strndup(lower_case_name, strlen(lower_case_name));
+  ink_hash_table_insert(machine_id_strings, lower_case_name, value);
+}
+
+void
+Machine::insert_id(IpAddr *ipaddr)
+{
+  int length = INET6_ADDRSTRLEN + 1;
+
+  if (ipaddr != nullptr) {
+    char *string_value = static_cast<char *>(ats_calloc(length, 1));
+    ipaddr->toString(string_value, length);
+    ink_hash_table_insert(machine_id_strings, string_value, string_value);
+    ink_hash_table_insert(machine_id_ipaddrs, string_value, ipaddr);
+  }
 }
diff --git a/lib/cppapi/Transaction.cc b/lib/cppapi/Transaction.cc
index a2b7f66..afc2019 100644
--- a/lib/cppapi/Transaction.cc
+++ b/lib/cppapi/Transaction.cc
@@ -397,9 +397,9 @@ Transaction::getCacheStatus()
 void
 Transaction::redirectTo(std::string const &url)
 {
-  char *s = ats_strdup(url.c_str());
+  std::string s = url;
   // Must re-alloc the string locally because ownership is transferred to the transaction.
-  TSHttpTxnRedirectUrlSet(state_->txn_, s, url.length());
+  TSHttpTxnRedirectUrlSet(state_->txn_, s.c_str(), url.length());
 }
 
 namespace
diff --git a/lib/perl/Makefile.am b/lib/perl/Makefile.am
index 2b97bb3..591ee2b 100644
--- a/lib/perl/Makefile.am
+++ b/lib/perl/Makefile.am
@@ -27,10 +27,13 @@ install-exec-local: Makefile-pl
 
 # The perl build needs to have the source files in the current working directory, so we need to
 # copy them to the build directory if we are building out of tree.
-Makefile-pl: Makefile.PL
+Makefile-pl: Makefile.PL $(top_builddir)/config.status
 	test -f "$(top_builddir)/$(subdir)/Makefile.PL" || cp -rf "$(srcdir)/." "$(top_builddir)/$(subdir)/"
 	$(PERL) Makefile.PL INSTALLDIRS=$(INSTALLDIRS) INSTALL_BASE=$(prefix) PREFIX=
 
+clean-local:
+	-rm Makefile-pl
+
 distclean-local:
 	-rm -rf Makefile-pl MYMETA.* blip
 
diff --git a/lib/records/I_RecCore.h b/lib/records/I_RecCore.h
index da904c7..ff39339 100644
--- a/lib/records/I_RecCore.h
+++ b/lib/records/I_RecCore.h
@@ -49,40 +49,33 @@ void RecConfigFileInit(void);
 int RecConfigFileParse(const char *path, RecConfigEntryCallback handler, bool inc_version);
 
 // Return a copy of the system's configuration directory, taking proxy.config.config_dir into account. The
-// caller MUST release the result with ats_free().
-char *RecConfigReadConfigDir();
+std::string RecConfigReadConfigDir();
 
 // Return a copy of the system's local state directory, taking proxy.config.local_state_dir into account. The
-// caller MUST release the result with ats_free().
-char *RecConfigReadRuntimeDir();
+std::string RecConfigReadRuntimeDir();
 
 // Return a copy of the system's snapshot directory, taking proxy.config.snapshot_dir into account. The caller
-// MUST release the result with ats_free().
-char *RecConfigReadSnapshotDir();
+std::string RecConfigReadSnapshotDir();
 
 // Return a copy of the system's log directory, taking proxy.config.log.logfile_dir into account. The caller
-// MUST release the result with ats_free().
-char *RecConfigReadLogDir();
+std::string RecConfigReadLogDir();
 
 // Return a copy of the system's bin directory, taking proxy.config.bin_path into account. The caller MUST
-// release the result with ats_free().
-char *RecConfigReadBinDir();
+std::string RecConfigReadBinDir();
 
 // Return a copy of the system's plugin directory, taking proxy.config.plugin.plugin_dir into account. The caller MUST
-// release the result with ats_free().
-char *RecConfigReadPluginDir();
+std::string RecConfigReadPluginDir();
 
 // Return a copy of a configuration file that is relative to sysconfdir. The relative path to the configuration
 // file is specified in the configuration variable named by "file_variable". If the configuration variable has no
-// value, nullptr is returned. The caller MUST release the result with ats_free().
-char *RecConfigReadConfigPath(const char *file_variable, const char *default_value = nullptr);
+// value, nullptr is returned.
+std::string RecConfigReadConfigPath(const char *file_variable, const char *default_value = nullptr);
 
 // This is the same as RecConfigReadConfigPath, except it makes the paths relative to $PREFIX.
-char *RecConfigReadPrefixPath(const char *file_variable, const char *default_value = nullptr);
+std::string RecConfigReadPrefixPath(const char *file_variable, const char *default_value = nullptr);
 
 // Return a copy of the persistent stats file. This is $RUNTIMEDIR/records.snap.
-// The caller MUST release the result with ats_free().
-char *RecConfigReadPersistentStatsPath();
+std::string RecConfigReadPersistentStatsPath();
 
 // Test whether the named configuration value is overridden by an environment variable. Return either
 // the overridden value, or the original value. Caller MUST NOT free the result.
diff --git a/lib/records/RecCore.cc b/lib/records/RecCore.cc
index ed8632e..5069782 100644
--- a/lib/records/RecCore.cc
+++ b/lib/records/RecCore.cc
@@ -216,10 +216,10 @@ RecCoreInit(RecModeT mode_type, Diags *_diags)
 
     ink_mutex_init(&g_rec_config_lock);
 
-    g_rec_config_fpath = RecConfigReadConfigPath(nullptr, REC_CONFIG_FILE REC_SHADOW_EXT);
+    g_rec_config_fpath = ats_stringdup(RecConfigReadConfigPath(nullptr, REC_CONFIG_FILE REC_SHADOW_EXT));
     if (RecFileExists(g_rec_config_fpath) == REC_ERR_FAIL) {
       ats_free((char *)g_rec_config_fpath);
-      g_rec_config_fpath = RecConfigReadConfigPath(nullptr, REC_CONFIG_FILE);
+      g_rec_config_fpath = ats_stringdup(RecConfigReadConfigPath(nullptr, REC_CONFIG_FILE));
       if (RecFileExists(g_rec_config_fpath) == REC_ERR_FAIL) {
         RecLog(DL_Warning, "Could not find '%s', system will run with defaults\n", REC_CONFIG_FILE);
         file_exists = false;
@@ -1124,7 +1124,7 @@ REC_readString(const char *name, bool *found, bool lock)
 // overrides specially here. Normally we would override the configuration
 // variable when we read records.config but to avoid the bootstrapping
 // problem, we make an explicit check here.
-char *
+std::string
 RecConfigReadConfigDir()
 {
   char buf[PATH_NAME_MAX] = {0};
@@ -1138,14 +1138,14 @@ RecConfigReadConfigDir()
   if (strlen(buf) > 0) {
     return Layout::get()->relative(buf);
   } else {
-    return ats_strdup(Layout::get()->sysconfdir);
+    return Layout::get()->sysconfdir;
   }
 }
 
 //-------------------------------------------------------------------------
 // RecConfigReadRuntimeDir
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadRuntimeDir()
 {
   char buf[PATH_NAME_MAX];
@@ -1155,14 +1155,14 @@ RecConfigReadRuntimeDir()
   if (strlen(buf) > 0) {
     return Layout::get()->relative(buf);
   } else {
-    return ats_strdup(Layout::get()->runtimedir);
+    return Layout::get()->runtimedir;
   }
 }
 
 //-------------------------------------------------------------------------
 // RecConfigReadLogDir
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadLogDir()
 {
   char buf[PATH_NAME_MAX];
@@ -1172,14 +1172,14 @@ RecConfigReadLogDir()
   if (strlen(buf) > 0) {
     return Layout::get()->relative(buf);
   } else {
-    return ats_strdup(Layout::get()->logdir);
+    return Layout::get()->logdir;
   }
 }
 
 //-------------------------------------------------------------------------
 // RecConfigReadBinDir
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadBinDir()
 {
   char buf[PATH_NAME_MAX];
@@ -1189,14 +1189,14 @@ RecConfigReadBinDir()
   if (strlen(buf) > 0) {
     return Layout::get()->relative(buf);
   } else {
-    return ats_strdup(Layout::get()->bindir);
+    return Layout::get()->bindir;
   }
 }
 
 //-------------------------------------------------------------------------
 // RecConfigReadPluginDir
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadPluginDir()
 {
   return RecConfigReadPrefixPath("proxy.config.plugin.plugin_dir");
@@ -1205,7 +1205,7 @@ RecConfigReadPluginDir()
 //-------------------------------------------------------------------------
 // RecConfigReadSnapshotDir.
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadSnapshotDir()
 {
   return RecConfigReadConfigPath("proxy.config.snapshot_dir", "snapshots");
@@ -1214,10 +1214,10 @@ RecConfigReadSnapshotDir()
 //-------------------------------------------------------------------------
 // RecConfigReadConfigPath
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadConfigPath(const char *file_variable, const char *default_value)
 {
-  ats_scoped_str sysconfdir(RecConfigReadConfigDir());
+  std::string sysconfdir(RecConfigReadConfigDir());
 
   // If the file name is in a configuration variable, look it up first ...
   if (file_variable) {
@@ -1235,13 +1235,13 @@ RecConfigReadConfigPath(const char *file_variable, const char *default_value)
     return Layout::get()->relative_to(sysconfdir, default_value);
   }
 
-  return nullptr;
+  return {};
 }
 
 //-------------------------------------------------------------------------
 // RecConfigReadPrefixPath
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadPrefixPath(const char *file_variable, const char *default_value)
 {
   char buf[PATH_NAME_MAX];
@@ -1260,16 +1260,16 @@ RecConfigReadPrefixPath(const char *file_variable, const char *default_value)
     return Layout::get()->relative_to(Layout::get()->prefix, default_value);
   }
 
-  return nullptr;
+  return {};
 }
 
 //-------------------------------------------------------------------------
 // RecConfigReadPersistentStatsPath
 //-------------------------------------------------------------------------
-char *
+std::string
 RecConfigReadPersistentStatsPath()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
+  std::string rundir(RecConfigReadRuntimeDir());
   return Layout::relative_to(rundir, REC_RAW_STATS_FILE);
 }
 
diff --git a/lib/records/RecHttp.cc b/lib/records/RecHttp.cc
index 225db94..9f4eadf 100644
--- a/lib/records/RecHttp.cc
+++ b/lib/records/RecHttp.cc
@@ -631,7 +631,7 @@ HttpProxyPort::print(char *out, size_t n)
     }
   }
 
-  return min(zret, n);
+  return std::min(zret, n);
 }
 
 void
diff --git a/lib/ts/BufferWriter.h b/lib/ts/BufferWriter.h
new file mode 100644
index 0000000..43872e6
--- /dev/null
+++ b/lib/ts/BufferWriter.h
@@ -0,0 +1,336 @@
+#if !defined TS_BUFFERWRITER_H_
+#define TS_BUFFERWRITER_H_
+
+/** @file
+
+    Utilities for generating character sequences.
+
+    @section license License
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ */
+
+#include <utility>
+#include <cstring>
+
+#include <ts/string_view.h>
+#include <ts/ink_assert.h>
+
+namespace ts
+{
+// Abstract class.
+//
+class BufferWriter
+{
+public:
+  // The write() functions "add" characters at the end.  If these functions discard any characters, this must put the instance
+  // in an error state (indicated by the override of error() ).  Derived classes must not assume the write() functions will
+  // not be called when the instance is in an error state.
+
+  virtual BufferWriter &write(char c) = 0;
+
+  virtual BufferWriter &
+  write(const void *data, size_t length)
+  {
+    const char *d = static_cast<const char *>(data);
+
+    while (length--) {
+      write(*(d++));
+    }
+    return *this;
+  }
+
+  BufferWriter &
+  write(const string_view &sV)
+  {
+    return write(sV.data(), sV.size());
+  }
+
+  /// Return the written buffer.
+  virtual const char *data() const = 0;
+
+  // Returns true if the instance is in an error state.
+  //
+  virtual bool error() const = 0;
+
+  // Returns pointer to an auxiliary buffer (or nullptr if none is available).  Succeeding calls to non-const member functions,
+  // other than auxBuffer(), must be presumed to invalidate the current auxiliary buffer (contents and address).  Results
+  // are UNDEFINED if character locations at or beyond auxBuffer()[remaining()] are written.
+  //
+  virtual char *
+  auxBuffer()
+  {
+    return nullptr;
+  }
+
+  // Write the first n characters that have been placed in the auxiliary buffer.  This call invalidates the auxiliary buffer.
+  // This function should not be called if no auxiliary buffer is available.
+  //
+  virtual BufferWriter &
+  write(size_t n)
+  {
+    return *this;
+  }
+
+  // Returns number of total characters that can be written without causing an error condidtion.
+  //
+  virtual size_t capacity() const = 0;
+
+  // Total number of characters that have been written, including those discarded due to an error condition.
+  //
+  virtual size_t extent() const = 0;
+
+  // Total number of characters that are in the buffer (successfully written and not discarded).
+  //
+  size_t
+  size() const
+  {
+    size_t e = extent(), c = capacity();
+
+    return e < c ? e : c;
+  }
+
+  // Returns number of additional characters that can be written without causing an error condidtion.
+  //
+  size_t
+  remaining() const
+  {
+    return capacity() - size();
+  }
+
+  // Reduce the capacity by n characters, potentially creating an error condition.
+  //
+  virtual BufferWriter &clip(size_t n) = 0;
+
+  // If there is an error condition, this function clears it and sets the extent to the size.  It then increases the
+  // capacity by n characters.
+  //
+  virtual BufferWriter &extend(size_t n) = 0;
+
+  // Make destructor virtual.
+  //
+  virtual ~BufferWriter() {}
+};
+
+// A buffer writer that writes to an array of char that is external to the writer instance.
+//
+class FixedBufferWriter : public BufferWriter
+{
+protected:
+  FixedBufferWriter(char *buf, size_t capacity, size_t attempted) : _buf(buf), _capacity(capacity), _attempted(attempted) {}
+
+public:
+  // 'buf' is a pointer to the external array of char to write to.  'capacity' is the number of bytes in the array.
+  //
+  // If you create a instance of this class with capacity == 0 (and a nullptr buffer), you can use it to measure the number of
+  // characters a series of writes would result it (from the extent() value) without actually writing.
+  //
+  FixedBufferWriter(char *buf, size_t capacity) : FixedBufferWriter(buf, capacity, 0) {}
+
+  FixedBufferWriter &
+  write(char c) override
+  {
+    if (_attempted < _capacity) {
+      _buf[_attempted] = c;
+    }
+    ++_attempted;
+
+    return *this;
+  }
+
+  FixedBufferWriter &
+  write(const void *data, size_t length) override
+  {
+    size_t newSize = _attempted + length;
+
+    if (newSize <= _capacity) {
+      std::memcpy(_buf + _attempted, data, length);
+
+    } else if (_attempted < _capacity) {
+      std::memcpy(_buf + _attempted, data, _capacity - _attempted);
+    }
+    _attempted = newSize;
+
+    return *this;
+  }
+
+  // It's not clear to my why g++ needs this using declaration in order to consider the inherited versions of 'write' when
+  // resolving calls to a 'write' member ( wkaras@oath.com ).
+  //
+  using BufferWriter::write;
+
+  /// Return the written buffer.
+  const char *
+  data() const override
+  {
+    return _buf;
+  }
+
+  bool
+  error() const override
+  {
+    return _attempted > _capacity;
+  }
+
+  char *
+  auxBuffer() override
+  {
+    return error() ? nullptr : _buf + _attempted;
+  }
+
+  FixedBufferWriter &
+  write(size_t n) override
+  {
+    _attempted += n;
+
+    return *this;
+  }
+
+  size_t
+  capacity() const override
+  {
+    return _capacity;
+  }
+
+  size_t
+  extent() const override
+  {
+    return _attempted;
+  }
+
+  FixedBufferWriter &
+  clip(size_t n) override
+  {
+    ink_assert(n <= _capacity);
+
+    _capacity -= n;
+
+    return *this;
+  }
+
+  FixedBufferWriter &
+  extend(size_t n) override
+  {
+    if (error()) {
+      _attempted = _capacity;
+    }
+
+    _capacity += n;
+
+    return *this;
+  }
+
+  // Reduce extent.  If extent is less than capacity, error condition is cleared.
+  //
+  void
+  reduce(size_t smallerExtent)
+  {
+    ink_assert(smallerExtent <= _attempted);
+
+    _attempted = smallerExtent;
+  }
+
+  // Provide a string_view of all successfully written characters.
+  //
+  string_view
+  view() const
+  {
+    return string_view(_buf, size());
+  }
+
+  operator string_view() const { return view(); }
+
+  // No copying
+  //
+  FixedBufferWriter(const FixedBufferWriter &) = delete;
+  FixedBufferWriter &operator=(const FixedBufferWriter &) = delete;
+
+  // Moving is OK.
+  //
+  FixedBufferWriter(FixedBufferWriter &&) = default;
+  FixedBufferWriter &operator=(FixedBufferWriter &&) = default;
+
+protected:
+  char *const _buf;
+
+  size_t _capacity;
+
+  size_t _attempted; // Number of characters written, including those discarded due error condition.
+};
+
+// A buffer writer that writes to an array of char (of fixed dimension N) that is internal to the writer instance.
+// It's called 'local' because instances are typically declared as stack-allocated, local function variables.
+//
+template <size_t N> class LocalBufferWriter : public FixedBufferWriter
+{
+public:
+  LocalBufferWriter() : FixedBufferWriter(_arr, N) {}
+
+  LocalBufferWriter(const LocalBufferWriter &that) : FixedBufferWriter(_arr, that._capacity, that._attempted)
+  {
+    std::memcpy(_arr, that._arr, size());
+  }
+
+  LocalBufferWriter &
+  operator=(const LocalBufferWriter &that)
+  {
+    if (this != &that) {
+      _capacity = that._capacity;
+
+      _attempted = that._attempted;
+
+      std::memcpy(_buf, that._buf, size());
+    }
+
+    return *this;
+  }
+
+  LocalBufferWriter &
+  extend(size_t n) override
+  {
+    if (error()) {
+      _attempted = _capacity;
+    }
+
+    _capacity += n;
+
+    ink_assert(_capacity < N);
+
+    return *this;
+  }
+
+  // Move construction/assignment intentionally defaulted to copying.
+
+protected:
+  char _arr[N];
+};
+
+inline BufferWriter &
+operator<<(BufferWriter &b, char c)
+{
+  return b.write(c);
+}
+
+inline BufferWriter &
+operator<<(BufferWriter &b, const string_view &sv)
+{
+  return b.write(sv);
+}
+
+} // end namespace ts
+
+#endif // include once
diff --git a/lib/ts/Diags.cc b/lib/ts/Diags.cc
index b0c1c82..70095d1 100644
--- a/lib/ts/Diags.cc
+++ b/lib/ts/Diags.cc
@@ -743,13 +743,13 @@ Diags::should_roll_outputlog()
         if (stdout_log->roll()) {
           char *oldname = ats_strdup(stdout_log->get_name());
           log_log_trace("in %s(), oldname=%s\n", __func__, oldname);
-          set_stdout_output(oldname);
+          set_std_output(StdStream::STDOUT, oldname);
 
           // if stderr and stdout are redirected to the same place, we should
           // update the stderr_log object as well
           if (!strcmp(oldname, stderr_log->get_name())) {
             log_log_trace("oldname == stderr_log->get_name()\n");
-            set_stderr_output(oldname);
+            set_std_output(StdStream::STDERR, oldname);
             need_consider_stderr = false;
           }
           ats_free(oldname);
@@ -773,13 +773,13 @@ Diags::should_roll_outputlog()
           outputlog_time_last_roll = now;
           char *oldname            = ats_strdup(stdout_log->get_name());
           log_log_trace("in %s, oldname=%s\n", __func__, oldname);
-          set_stdout_output(oldname);
+          set_std_output(StdStream::STDOUT, oldname);
 
           // if stderr and stdout are redirected to the same place, we should
           // update the stderr_log object as well
           if (!strcmp(oldname, stderr_log->get_name())) {
             log_log_trace("oldname == stderr_log->get_name()\n");
-            set_stderr_output(oldname);
+            set_std_output(StdStream::STDERR, oldname);
             need_consider_stderr = false;
           }
           ats_free(oldname);
@@ -806,105 +806,64 @@ Diags::should_roll_outputlog()
 }
 
 /*
- * Binds stdout to stdout_path, provided that stdout_path != "".
- * Also sets up a BaseLogFile for stdout.
+ * Sets up a BaseLogFile for the specified file. Then it binds the specified standard steam
+ * to the aforementioned BaseLogFile.
  *
- * Returns true on binding and setup, false otherwise
- *
- * TODO make this a generic function (ie combine set_stdout_output and
- * set_stderr_output
+ * Returns true on successful binding and setup, false otherwise
  */
 bool
-Diags::set_stdout_output(const char *stdout_path)
+Diags::set_std_output(StdStream stream, const char *file)
 {
-  if (strcmp(stdout_path, "") == 0) {
-    return false;
-  }
+  const char *target_stream;
+  BaseLogFile **current;
+  BaseLogFile *old_log, *new_log;
 
-  BaseLogFile *old_stdout_log = stdout_log;
-  BaseLogFile *new_stdout_log = new BaseLogFile(stdout_path);
-
-  // on any errors we quit
-  if (!new_stdout_log || new_stdout_log->open_file(output_logfile_perm) != BaseLogFile::LOG_FILE_NO_ERROR) {
-    log_log_error("[Warning]: unable to open file=%s to bind stdout to\n", stdout_path);
-    log_log_error("[Warning]: stdout is currently not bound to anything\n");
-    delete new_stdout_log;
-    lock();
-    stdout_log = nullptr;
-    unlock();
-    return false;
-  }
-  if (!new_stdout_log->is_open()) {
-    log_log_error("[Warning]: file pointer for stdout %s = nullptr\n", stdout_path);
-    log_log_error("[Warning]: stdout is currently not bound to anything\n");
-    delete new_stdout_log;
-    lock();
-    stdout_log = nullptr;
-    unlock();
+  // If the caller is stupid, we give up
+  if (strcmp(file, "") == 0) {
     return false;
   }
 
-  // now exchange the stdout_log pointer
-  lock();
-  stdout_log = new_stdout_log;
-  bool ret   = rebind_stdout(fileno(new_stdout_log->m_fp));
-  unlock();
-
-  if (old_stdout_log) {
-    delete old_stdout_log;
-  }
-
-  // "this should never happen"^{TM}
-  ink_release_assert(ret);
-
-  return ret;
-}
-
-/*
- * Binds stderr to stderr_path, provided that stderr_path != "".
- * Also sets up a BaseLogFile for stderr.
- *
- * Returns true on binding and setup, false otherwise
- */
-bool
-Diags::set_stderr_output(const char *stderr_path)
-{
-  if (strcmp(stderr_path, "") == 0) {
-    return false;
-  }
-
-  BaseLogFile *old_stderr_log = stderr_log;
-  BaseLogFile *new_stderr_log = new BaseLogFile(stderr_path);
-
-  // on any errors we quit
-  if (!new_stderr_log || new_stderr_log->open_file(output_logfile_perm) != BaseLogFile::LOG_FILE_NO_ERROR) {
-    log_log_error("[Warning]: unable to open file=%s to bind stderr to\n", stderr_path);
-    log_log_error("[Warning]: stderr is currently not bound to anything\n");
-    delete new_stderr_log;
+  // Figure out which standard stream we want to redirect
+  if (stream == StdStream::STDOUT) {
+    target_stream = "stdout";
+    current       = &stdout_log;
+  } else {
+    target_stream = "stderr";
+    current       = &stderr_log;
+  }
+  (void)target_stream; // silence clang-analyzer for now
+  old_log = *current;
+  new_log = new BaseLogFile(file);
+
+  // On any errors we quit
+  if (!new_log || new_log->open_file(output_logfile_perm) != BaseLogFile::LOG_FILE_NO_ERROR) {
+    log_log_error("[Warning]: unable to open file=%s to bind %s to\n", file, target_stream);
+    log_log_error("[Warning]: %s is currently not bound to anything\n", target_stream);
+    delete new_log;
     lock();
-    stderr_log = nullptr;
+    *current = nullptr;
     unlock();
     return false;
   }
-  if (!new_stderr_log->is_open()) {
-    log_log_error("[Warning]: file pointer for stderr %s = nullptr\n", stderr_path);
-    log_log_error("[Warning]: stderr is currently not bound to anything\n");
-    delete new_stderr_log;
+  if (!new_log->is_open()) {
+    log_log_error("[Warning]: file pointer for %s %s = nullptr\n", target_stream, file);
+    log_log_error("[Warning]: %s is currently not bound to anything\n", target_stream);
+    delete new_log;
     lock();
-    stderr_log = nullptr;
+    *current = nullptr;
     unlock();
     return false;
   }
 
-  // now exchange the stderr_log pointer
+  // Now exchange the pointer to the standard stream in question
   lock();
-  stderr_log = new_stderr_log;
-  bool ret   = rebind_stderr(fileno(stderr_log->m_fp));
+  *current = new_log;
+  bool ret = rebind_std_stream(stream, fileno(new_log->m_fp));
   unlock();
 
-  if (old_stderr_log) {
-    delete old_stderr_log;
-  }
+  // Free the BaseLogFile we rotated out
+  if (old_log)
+    delete old_log;
 
   // "this should never happen"^{TM}
   ink_release_assert(ret);
@@ -913,34 +872,30 @@ Diags::set_stderr_output(const char *stderr_path)
 }
 
 /*
- * Helper function that rebinds stdout to specified file descriptor
+ * Helper function that rebinds a specified stream to specified file descriptor
  *
  * Returns true on success, false otherwise
  */
 bool
-Diags::rebind_stdout(int new_fd)
+Diags::rebind_std_stream(StdStream stream, int new_fd)
 {
-  if (new_fd < 0) {
-    log_log_error("[Warning]: TS unable to bind stdout to new file descriptor=%d", new_fd);
+  const char *target_stream;
+  int stream_fd;
+
+  // Figure out which stream to dup2
+  if (stream == StdStream::STDOUT) {
+    target_stream = "stdout";
+    stream_fd     = STDOUT_FILENO;
   } else {
-    dup2(new_fd, STDOUT_FILENO);
-    return true;
+    target_stream = "stderr";
+    stream_fd     = STDERR_FILENO;
   }
-  return false;
-}
+  (void)target_stream; // silence clang-analyzer for now
 
-/*
- * Helper function that rebinds stderr to specified file descriptor
- *
- * Returns true on success, false otherwise
- */
-bool
-Diags::rebind_stderr(int new_fd)
-{
-  if (new_fd < 0) {
-    log_log_error("[Warning]: TS unable to bind stderr to new file descriptor=%d", new_fd);
-  } else {
-    dup2(new_fd, STDERR_FILENO);
+  if (new_fd < 0)
+    log_log_error("[Warning]: TS unable to bind %s to new file descriptor=%d", target_stream, new_fd);
+  else {
+    dup2(new_fd, stream_fd);
     return true;
   }
   return false;
diff --git a/lib/ts/Diags.h b/lib/ts/Diags.h
index f055d01..6fbea8f 100644
--- a/lib/ts/Diags.h
+++ b/lib/ts/Diags.h
@@ -49,10 +49,10 @@
 class Diags;
 
 // extern int diags_on_for_plugins;
-typedef enum {
+enum DiagsTagType {
   DiagsTagType_Debug  = 0, // do not renumber --- used as array index
   DiagsTagType_Action = 1
-} DiagsTagType;
+};
 
 struct DiagsModeOutput {
   bool to_stdout;
@@ -61,18 +61,20 @@ struct DiagsModeOutput {
   bool to_diagslog;
 };
 
-typedef enum {  // do not renumber --- used as array index
-  DL_Diag = 0,  // process does not die
-  DL_Debug,     // process does not die
-  DL_Status,    // process does not die
-  DL_Note,      // process does not die
-  DL_Warning,   // process does not die
-  DL_Error,     // process does not die
-  DL_Fatal,     // causes process termination
-  DL_Alert,     // causes process termination
-  DL_Emergency, // causes process termination
-  DL_Undefined  // must be last, used for size!
-} DiagsLevel;
+enum DiagsLevel { // do not renumber --- used as array index
+  DL_Diag = 0,    // process does not die
+  DL_Debug,       // process does not die
+  DL_Status,      // process does not die
+  DL_Note,        // process does not die
+  DL_Warning,     // process does not die
+  DL_Error,       // process does not die
+  DL_Fatal,       // causes process termination
+  DL_Alert,       // causes process termination
+  DL_Emergency,   // causes process termination
+  DL_Undefined    // must be last, used for size!
+};
+
+enum StdStream { STDOUT = 0, STDERR };
 
 enum RollingEnabledValues { NO_ROLLING = 0, ROLL_ON_TIME, ROLL_ON_SIZE, ROLL_ON_TIME_OR_SIZE, INVALID_ROLLING_VALUE };
 
@@ -215,8 +217,7 @@ public:
   bool should_roll_diagslog();
   bool should_roll_outputlog();
 
-  bool set_stdout_output(const char *_bind_stdout);
-  bool set_stderr_output(const char *_bind_stderr);
+  bool set_std_output(StdStream stream, const char *file);
 
   const char *base_debug_tags;  // internal copy of default debug tags
   const char *base_action_tags; // internal copy of default action tags
@@ -240,8 +241,7 @@ private:
   time_t outputlog_time_last_roll;
   time_t diagslog_time_last_roll;
 
-  bool rebind_stdout(int new_fd);
-  bool rebind_stderr(int new_fd);
+  bool rebind_std_stream(StdStream stream, int new_fd);
 
   void
   lock() const
diff --git a/lib/ts/HashSip.cc b/lib/ts/HashSip.cc
index 4543b4a..6e64298 100644
--- a/lib/ts/HashSip.cc
+++ b/lib/ts/HashSip.cc
@@ -45,7 +45,7 @@ ATSHash64Sip24::ATSHash64Sip24(const unsigned char key[16]) : k0(U8TO64_LE(key))
   this->clear();
 }
 
-ATSHash64Sip24::ATSHash64Sip24(uint64_t key0, uint64_t key1)
+ATSHash64Sip24::ATSHash64Sip24(uint64_t key0, uint64_t key1) : k0(key0), k1(key1)
 {
   this->clear();
 }
diff --git a/lib/ts/I_Layout.h b/lib/ts/I_Layout.h
index dd6d47f..b742340 100644
--- a/lib/ts/I_Layout.h
+++ b/lib/ts/I_Layout.h
@@ -31,50 +31,50 @@
 #ifndef _I_Layout_h
 #define _I_Layout_h
 
+// use std string and string view for layout
+#include <string>
+#include "ts/string_view.h"
+
 /**
   The Layout is a simple place holder for the distribution layout.
 
  */
 struct Layout {
-  Layout(const char *prefix = 0);
+  Layout(ts::string_view const _prefix = {});
   ~Layout();
 
   /**
-   Return file path relative to Layout->prefix
-   Memory is allocated, so use ats_free() when no longer needed
+   return use runroot or not
 
   */
-  char *relative(const char *file);
+  bool check_runroot();
 
   /**
-   update the sysconfdir to a test conf dir
-   */
-  void update_sysconfdir(const char *dir);
+   Return file path relative to Layout->prefix
+
+  */
+  std::string relative(ts::string_view file);
 
   /**
-   Return file path relative to Layout->prefix
-   Store the path to buf. The buf should be large eough to store
-   PATH_NAME_MAX characters
+   update the sysconfdir to a test conf dir
 
    */
-  void relative(char *buf, size_t bufsz, const char *file);
+  void update_sysconfdir(ts::string_view dir);
 
   /**
    Return file path relative to dir
-   Memory is allocated, so use ats_free() when no longer needed
    Example usage: Layout::relative_to(default_layout()->sysconfdir, "foo.bar");
 
   */
-  static char *relative_to(const char *dir, const char *file);
+  static std::string relative_to(ts::string_view dir, ts::string_view file);
 
   /**
    Return file path relative to dir
    Store the path to buf. The buf should be large eough to store
-   PATH_NAME_MAX characters
    Example usage: Layout::relative_to(default_layout()->sysconfdir, "foo.bar");
 
   */
-  static void relative_to(char *buf, size_t bufsz, const char *dir, const char *file);
+  static void relative_to(char *buf, size_t bufsz, ts::string_view dir, ts::string_view file);
 
   /**
    Creates a Layout Object with the given prefix.  If no
@@ -82,7 +82,7 @@ struct Layout {
    at the compile time.
 
   */
-  static void create(const char *prefix = 0);
+  static void create(ts::string_view const prefix = {});
 
   /**
    Returns the Layout object created by create_default_layout().
@@ -90,21 +90,21 @@ struct Layout {
   */
   static Layout *get();
 
-  char *prefix        = nullptr;
-  char *exec_prefix   = nullptr;
-  char *bindir        = nullptr;
-  char *sbindir       = nullptr;
-  char *sysconfdir    = nullptr;
-  char *datadir       = nullptr;
-  char *includedir    = nullptr;
-  char *libdir        = nullptr;
-  char *libexecdir    = nullptr;
-  char *localstatedir = nullptr;
-  char *runtimedir    = nullptr;
-  char *logdir        = nullptr;
-  char *mandir        = nullptr;
-  char *infodir       = nullptr;
-  char *cachedir      = nullptr;
+  std::string prefix;
+  std::string exec_prefix;
+  std::string bindir;
+  std::string sbindir;
+  std::string sysconfdir;
+  std::string datadir;
+  std::string includedir;
+  std::string libdir;
+  std::string libexecdir;
+  std::string localstatedir;
+  std::string runtimedir;
+  std::string logdir;
+  std::string mandir;
+  std::string infodir;
+  std::string cachedir;
 };
 
 #endif
diff --git a/lib/ts/IpMap.h b/lib/ts/IpMap.h
index d3500d6..95439ab 100644
--- a/lib/ts/IpMap.h
+++ b/lib/ts/IpMap.h
@@ -191,7 +191,7 @@ public:
   */
   self &mark(sockaddr const *min, ///< Minimum value in range.
              sockaddr const *max, ///< Maximum value in range.
-             void *data = 0       ///< Client data payload.
+             void *data = nullptr ///< Client data payload.
              );
 
   /** Mark a range.
@@ -199,9 +199,9 @@ public:
       @note Convenience overload for IPv4 addresses.
       @return This object.
   */
-  self &mark(in_addr_t min, ///< Minimum address (network order).
-             in_addr_t max, ///< Maximum address (network order).
-             void *data = 0 ///< Client data.
+  self &mark(in_addr_t min,       ///< Minimum address (network order).
+             in_addr_t max,       ///< Maximum address (network order).
+             void *data = nullptr ///< Client data.
              );
 
   /** Mark a range.
@@ -209,9 +209,9 @@ public:
       @note Convenience overload for IPv4 addresses.
       @return This object.
   */
-  self &mark(IpAddr const &min, ///< Minimum address (network order).
-             IpAddr const &max, ///< Maximum address (network order).
-             void *data = 0     ///< Client data.
+  self &mark(IpAddr const &min,   ///< Minimum address (network order).
+             IpAddr const &max,   ///< Maximum address (network order).
+             void *data = nullptr ///< Client data.
              );
 
   /** Mark an IPv4 address @a addr with @a data.
@@ -219,8 +219,8 @@ public:
       @note Convenience overload for IPv4 addresses.
       @return This object.
   */
-  self &mark(in_addr_t addr, ///< Address (network order).
-             void *data = 0  ///< Client data.
+  self &mark(in_addr_t addr,      ///< Address (network order).
+             void *data = nullptr ///< Client data.
              );
 
   /** Mark a range.
@@ -230,7 +230,7 @@ public:
   */
   self &mark(IpEndpoint const *min, ///< Minimum address (network order).
              IpEndpoint const *max, ///< Maximum address (network order).
-             void *data = 0         ///< Client data.
+             void *data = nullptr   ///< Client data.
              );
 
   /** Mark an address @a addr with @a data.
@@ -239,7 +239,7 @@ public:
       @return This object.
   */
   self &mark(IpEndpoint const *addr, ///< Address (network order).
-             void *data = 0          ///< Client data.
+             void *data = nullptr    ///< Client data.
              );
 
   /** Unmark addresses.
@@ -270,11 +270,11 @@ public:
 
       @return This object.
   */
-  self &fill(sockaddr const *min, sockaddr const *max, void *data = 0);
+  self &fill(sockaddr const *min, sockaddr const *max, void *data = nullptr);
   /// Fill addresses (overload).
-  self &fill(IpEndpoint const *min, IpEndpoint const *max, void *data = 0);
+  self &fill(IpEndpoint const *min, IpEndpoint const *max, void *data = nullptr);
   /// Fill addresses (overload).
-  self &fill(in_addr_t min, in_addr_t max, void *data = 0);
+  self &fill(in_addr_t min, in_addr_t max, void *data = nullptr);
 
   /** Test for membership.
 
@@ -283,7 +283,7 @@ public:
       is set to the client data for the address.
   */
   bool contains(sockaddr const *target, ///< Search target (network order).
-                void **ptr = 0          ///< Client data return.
+                void **ptr = nullptr    ///< Client data return.
                 ) const;
 
   /** Test for membership.
@@ -294,8 +294,8 @@ public:
       If the address is in the map and @a ptr is not @c nullptr, @c *ptr
       is set to the client data for the address.
   */
-  bool contains(in_addr_t target, ///< Search target (network order).
-                void **ptr = 0    ///< Client data return.
+  bool contains(in_addr_t target,    ///< Search target (network order).
+                void **ptr = nullptr ///< Client data return.
                 ) const;
 
   /** Test for membership.
@@ -307,7 +307,7 @@ public:
       is set to the client data for the address.
   */
   bool contains(IpEndpoint const *target, ///< Search target (network order).
-                void **ptr = 0            ///< Client data return.
+                void **ptr = nullptr      ///< Client data return.
                 ) const;
 
   /** Test for membership.
@@ -319,7 +319,7 @@ public:
       is set to the client data for the address.
   */
   bool contains(IpAddr const &target, ///< Search target (network order).
-                void **ptr = 0        ///< Client data return.
+                void **ptr = nullptr  ///< Client data return.
                 ) const;
 
   /** Remove all addresses from the map.
diff --git a/lib/ts/IpMapTest.cc b/lib/ts/IpMapTest.cc
deleted file mode 100644
index 948ecd2..0000000
--- a/lib/ts/IpMapTest.cc
+++ /dev/null
@@ -1,283 +0,0 @@
-/** @file
-
-    A brief file description
-
-    @section license License
-
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-*/
-
-#include <ts/IpMap.h>
-#include <ts/TestBox.h>
-
-void
-IpMapTestPrint(IpMap &map)
-{
-  printf("IpMap Dump\n");
-  for (IpMap::iterator spot(map.begin()), limit(map.end()); spot != limit; ++spot) {
-    ip_text_buffer ipb1, ipb2;
-
-    printf("%s - %s : %p\n", ats_ip_ntop(spot->min(), ipb1, sizeof ipb1), ats_ip_ntop(spot->max(), ipb2, sizeof(ipb2)),
-           spot->data());
-  }
-  printf("\n");
-}
-
-REGRESSION_TEST(IpMap_Basic)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
-{
-  TestBox tb(t, pstatus);
-
-  IpMap map;
-  void *const markA = reinterpret_cast<void *>(1);
-  void *const markB = reinterpret_cast<void *>(2);
-  void *const markC = reinterpret_cast<void *>(3);
-  void *mark; // for retrieval
-
-  in_addr_t ip5 = htonl(5), ip9 = htonl(9);
-  in_addr_t ip10 = htonl(10), ip15 = htonl(15), ip20 = htonl(20);
-  in_addr_t ip50 = htonl(50), ip60 = htonl(60);
-  in_addr_t ip100 = htonl(100), ip120 = htonl(120), ip140 = htonl(140);
-  in_addr_t ip150 = htonl(150), ip160 = htonl(160);
-  in_addr_t ip200 = htonl(200);
-  in_addr_t ip0   = 0;
-  in_addr_t ipmax = ~static_cast<in_addr_t>(0);
-
-  *pstatus = REGRESSION_TEST_PASSED;
-
-  map.mark(ip10, ip20, markA);
-  map.mark(ip5, ip9, markA);
-  tb.check(map.getCount() == 1, "Coalesce failed");
-  tb.check(map.contains(ip9), "Range max not found.");
-  tb.check(map.contains(ip10, &mark), "Span min not found.");
-  tb.check(mark == markA, "Mark not preserved.");
-
-  map.fill(ip15, ip100, markB);
-  tb.check(map.getCount() == 2, "Fill failed.");
-  tb.check(map.contains(ip50, &mark), "Fill interior missing.");
-  tb.check(mark == markB, "Fill mark not preserved.");
-  tb.check(!map.contains(ip200), "Span min not found.");
-  tb.check(map.contains(ip15, &mark), "Old span interior not found.");
-  tb.check(mark == markA, "Fill overwrote mark.");
-
-  map.clear();
-  tb.check(map.getCount() == 0, "Clear failed.");
-
-  map.mark(ip20, ip50, markA);
-  map.mark(ip100, ip150, markB);
-  map.fill(ip10, ip200, markC);
-  tb.check(map.getCount() == 5, "Test 3 failed [expected 5, got %d].", map.getCount());
-  tb.check(map.contains(ip15, &mark), "Test 3 - left span missing.");
-  tb.check(map.contains(ip60, &mark), "Test 3 - middle span missing.");
-  tb.check(mark == markC, "Test 3 - fill mark wrong.");
-  tb.check(map.contains(ip160), "Test 3 - right span missing.");
-  tb.check(map.contains(ip120, &mark), "Test 3 - right mark span missing.");
-  tb.check(mark == markB, "Test 3 - wrong data on right mark span.");
-  map.unmark(ip140, ip160);
-  tb.check(map.getCount() == 5, "Test 3 unmark failed [expected 5, got %d].", map.getCount());
-  tb.check(!map.contains(ip140), "Test 3 - unmark left edge still there.");
-  tb.check(!map.contains(ip150), "Test 3 - unmark middle still there.");
-  tb.check(!map.contains(ip160), "Test 3 - unmark right edge still there.");
-
-  map.clear();
-  map.mark(ip20, ip20, markA);
-  tb.check(map.contains(ip20), "Map failed on singleton insert");
-  map.mark(ip10, ip200, markB);
-  mark = 0;
-  map.contains(ip20, &mark);
-  tb.check(mark == markB, "Map held singleton against range.");
-  map.mark(ip100, ip120, markA);
-  map.mark(ip150, ip160, markB);
-  map.mark(ip0, ipmax, markC);
-  tb.check(map.getCount() == 1, "IpMap: Full range fill left extra ranges.");
-}
-
-REGRESSION_TEST(IpMap_Unmark)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
-{
-  TestBox tb(t, pstatus);
-  IpMap map;
-  //  ip_text_buffer ipb1, ipb2;
-  void *const markA = reinterpret_cast<void *>(1);
-
-  IpEndpoint a_0, a_0_0_0_16, a_0_0_0_17, a_max;
-  IpEndpoint a_10_28_56_0, a_10_28_56_4, a_10_28_56_255;
-  IpEndpoint a_10_28_55_255, a_10_28_57_0;
-  IpEndpoint a_63_128_1_12;
-  IpEndpoint a_loopback, a_loopback2;
-  IpEndpoint a6_0, a6_max, a6_fe80_9d90, a6_fe80_9d9d, a6_fe80_9d95;
-
-  ats_ip_pton("0.0.0.0", &a_0);
-  ats_ip_pton("0.0.0.16", &a_0_0_0_16);
-  ats_ip_pton("0.0.0.17", &a_0_0_0_17);
-  ats_ip_pton("255.255.255.255", &a_max);
-  ats_ip_pton("10.28.55.255", &a_10_28_55_255);
-  ats_ip_pton("10.28.56.0", &a_10_28_56_0);
-  ats_ip_pton("10.28.56.4", &a_10_28_56_4);
-  ats_ip_pton("10.28.56.255", &a_10_28_56_255);
-  ats_ip_pton("10.28.57.0", &a_10_28_57_0);
-  ats_ip_pton("63.128.1.12", &a_63_128_1_12);
-  ats_ip_pton("::", &a6_0);
-  ats_ip_pton("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", &a6_max);
-  ats_ip_pton("fe80::221:9bff:fe10:9d90", &a6_fe80_9d90);
-  ats_ip_pton("fe80::221:9bff:fe10:9d9d", &a6_fe80_9d9d);
-  ats_ip_pton("fe80::221:9bff:fe10:9d95", &a6_fe80_9d95);
-  ats_ip_pton("127.0.0.1", &a_loopback);
-  ats_ip_pton("127.0.0.255", &a_loopback2);
-  *pstatus = REGRESSION_TEST_PASSED;
-
-  map.mark(&a_0, &a_max, markA);
-  tb.check(map.getCount() == 1, "IpMap Unmark: Full range not single.");
-  map.unmark(&a_10_28_56_0, &a_10_28_56_255);
-  tb.check(map.getCount() == 2, "IpMap Unmark: Range unmark failed.");
-  // Generic range check.
-  tb.check(!map.contains(&a_10_28_56_0), "IpMap Unmark: Range unmark min address not removed.");
-  tb.check(!map.contains(&a_10_28_56_255), "IpMap Unmark: Range unmark max address not removed.");
-  tb.check(map.contains(&a_10_28_55_255), "IpMap Unmark: Range unmark min-1 address removed.");
-  tb.check(map.contains(&a_10_28_57_0), "IpMap Unmark: Range unmark max+1 address removed.");
-  // Test min bounded range.
-  map.unmark(&a_0, &a_0_0_0_16);
-  tb.check(!map.contains(&a_0), "IpMap Unmark: Range unmark zero address not removed.");
-  tb.check(!map.contains(&a_0_0_0_16), "IpMap Unmark: Range unmark zero bounded range max not removed.");
-  tb.check(map.contains(&a_0_0_0_17), "IpMap Unmark: Range unmark zero bounded range max+1 removed.");
-}
-
-REGRESSION_TEST(IpMap_Fill)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
-{
-  TestBox tb(t, pstatus);
-  IpMap map;
-  ip_text_buffer ipb1, ipb2;
-  void *const allow = reinterpret_cast<void *>(0);
-  void *const deny  = reinterpret_cast<void *>(~0);
-  void *const markA = reinterpret_cast<void *>(1);
-  void *const markB = reinterpret_cast<void *>(2);
-  void *const markC = reinterpret_cast<void *>(3);
-  void *mark; // for retrieval
-
-  IpEndpoint a0, a_10_28_56_0, a_10_28_56_255, a3, a4;
-  IpEndpoint a_9_255_255_255, a_10_0_0_0, a_10_0_0_19, a_10_0_0_255, a_10_0_1_0;
-  IpEndpoint a_10_28_56_4, a_max, a_loopback, a_loopback2;
-  IpEndpoint a_10_28_55_255, a_10_28_57_0;
-  IpEndpoint a_63_128_1_12;
-  IpEndpoint a_0000_0000, a_0000_0001, a_ffff_ffff;
-  IpEndpoint a_fe80_9d8f, a_fe80_9d90, a_fe80_9d95, a_fe80_9d9d, a_fe80_9d9e;
-
-  *pstatus = REGRESSION_TEST_PASSED;
-
-  ats_ip_pton("0.0.0.0", &a0);
-  ats_ip_pton("255.255.255.255", &a_max);
-
-  ats_ip_pton("9.255.255.255", &a_9_255_255_255);
-  ats_ip_pton("10.0.0.0", &a_10_0_0_0);
-  ats_ip_pton("10.0.0.19", &a_10_0_0_19);
-  ats_ip_pton("10.0.0.255", &a_10_0_0_255);
-  ats_ip_pton("10.0.1.0", &a_10_0_1_0);
-
-  ats_ip_pton("10.28.55.255", &a_10_28_55_255);
-  ats_ip_pton("10.28.56.0", &a_10_28_56_0);
-  ats_ip_pton("10.28.56.4", &a_10_28_56_4);
-  ats_ip_pton("10.28.56.255", &a_10_28_56_255);
-  ats_ip_pton("10.28.57.0", &a_10_28_57_0);
-
-  ats_ip_pton("192.168.1.0", &a3);
-  ats_ip_pton("192.168.1.255", &a4);
-
-  ats_ip_pton("::", &a_0000_0000);
-  ats_ip_pton("::1", &a_0000_0001);
-  ats_ip_pton("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", &a_ffff_ffff);
-  ats_ip_pton("fe80::221:9bff:fe10:9d8f", &a_fe80_9d8f);
-  ats_ip_pton("fe80::221:9bff:fe10:9d90", &a_fe80_9d90);
-  ats_ip_pton("fe80::221:9bff:fe10:9d95", &a_fe80_9d95);
-  ats_ip_pton("fe80::221:9bff:fe10:9d9d", &a_fe80_9d9d);
-  ats_ip_pton("fe80::221:9bff:fe10:9d9e", &a_fe80_9d9e);
-
-  ats_ip_pton("127.0.0.0", &a_loopback);
-  ats_ip_pton("127.0.0.255", &a_loopback2);
-  ats_ip_pton("63.128.1.12", &a_63_128_1_12);
-
-  map.fill(&a_10_28_56_0, &a_10_28_56_255, deny);
-  map.fill(&a0, &a_max, allow);
-
-  tb.check(map.contains(&a_10_28_56_4, &mark), "IpMap Fill: Target not found.");
-  tb.check(mark == deny, "IpMap Fill: Expected deny, got allow at %s.", ats_ip_ntop(&a_10_28_56_4, ipb1, sizeof(ipb1)));
-
-  map.clear();
-  map.fill(&a_loopback, &a_loopback, allow);
-  tb.check(map.contains(&a_loopback), "IpMap fill: singleton not marked.");
-  map.fill(&a0, &a_max, deny);
-
-  mark = 0;
-  tb.check(map.contains(&a_loopback, &mark), "IpMap fill: singleton marking lost.");
-  tb.check(mark == allow, "IpMap fill: overwrote existing singleton mark.");
-  if (tb.check(map.begin() != map.end(), "IpMap fill: map is empty.")) {
-    if (tb.check(++(map.begin()) != map.end(), "IpMap fill: only one range.")) {
-      tb.check(-1 == ats_ip_addr_cmp(map.begin()->max(), (++map.begin())->min()), "IpMap fill: ranges not disjoint [%s < %s].",
-               ats_ip_ntop(map.begin()->max(), ipb1, sizeof(ipb1)), ats_ip_ntop((++map.begin())->min(), ipb2, sizeof(ipb2)));
-    }
-  }
-
-  map.clear();
-  map.fill(&a_loopback, &a_loopback2, markA);
-  map.fill(&a_10_28_56_0, &a_10_28_56_255, markB);
-  tb.check(!map.contains(&a_63_128_1_12, &mark), "IpMap fill[2]: over extended range.");
-  map.fill(&a0, &a_max, markC);
-  tb.check(map.getCount() == 5, "IpMap[2]: Fill failed.");
-  if (tb.check(map.contains(&a_63_128_1_12, &mark), "IpMap fill[2]: Collapsed range.")) {
-    tb.check(mark == markC, "IpMap fill[2]: invalid mark for range gap.");
-  }
-
-  map.clear();
-  map.fill(&a_10_0_0_0, &a_10_0_0_255, allow);
-  map.fill(&a_loopback, &a_loopback2, allow);
-  tb.check(!map.contains(&a_63_128_1_12, &mark), "IpMap fill[3]: invalid mark between ranges.");
-  tb.check(map.contains(&a_10_0_0_19, &mark) && mark == allow, "IpMap fill[3]: invalid mark in lower range.");
-  map.fill(&a0, &a_max, deny);
-  if (!tb.check(map.getCount() == 5, "IpMap[3]: Wrong number of ranges."))
-    IpMapTestPrint(map);
-  if (tb.check(map.contains(&a_63_128_1_12, &mark), "IpMap fill[3]: Missing mark between ranges")) {
-    tb.check(mark == deny, "IpMap fill[3]: gap range invalidly marked");
-  }
-
-  map.fill(&a_fe80_9d90, &a_fe80_9d9d, markA);
-  map.fill(&a_0000_0001, &a_0000_0001, markA);
-  map.fill(&a_0000_0000, &a_ffff_ffff, markB);
-
-  tb.check(map.contains(&a_0000_0000, &mark) && mark == markB, "IpMap Fill[v6]: Zero address has bad mark.");
-  tb.check(map.contains(&a_ffff_ffff, &mark) && mark == markB, "IpMap Fill[v6]: Max address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d90, &mark) && mark == markA, "IpMap Fill[v6]: 9d90 address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d8f, &mark) && mark == markB, "IpMap Fill[v6]: 9d8f address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d9d, &mark) && mark == markA, "IpMap Fill[v6]: 9d9d address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d9e, &mark) && mark == markB, "IpMap Fill[v6]: 9d9b address has bad mark.");
-  tb.check(map.contains(&a_0000_0001, &mark) && mark == markA, "IpMap Fill[v6]: ::1 has bad mark.");
-
-  tb.check(map.getCount() == 10, "IpMap Fill[pre-refill]: Bad range count.");
-  // These should be ignored by the map as it is completely covered for IPv6.
-  map.fill(&a_fe80_9d90, &a_fe80_9d9d, markA);
-  map.fill(&a_0000_0001, &a_0000_0001, markC);
-  map.fill(&a_0000_0000, &a_ffff_ffff, markB);
-  tb.check(map.getCount() == 10, "IpMap Fill[post-refill]: Bad range count.");
-
-  map.clear();
-  map.fill(&a_fe80_9d90, &a_fe80_9d9d, markA);
-  map.fill(&a_0000_0001, &a_0000_0001, markC);
-  map.fill(&a_0000_0000, &a_ffff_ffff, markB);
-  tb.check(map.contains(&a_0000_0000, &mark) && mark == markB, "IpMap Fill[v6-2]: Zero address has bad mark.");
-  tb.check(map.contains(&a_ffff_ffff, &mark) && mark == markB, "IpMap Fill[v6-2]: Max address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d90, &mark) && mark == markA, "IpMap Fill[v6-2]: 9d90 address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d8f, &mark) && mark == markB, "IpMap Fill[v6-2]: 9d8f address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d9d, &mark) && mark == markA, "IpMap Fill[v6-2]: 9d9d address has bad mark.");
-  tb.check(map.contains(&a_fe80_9d9e, &mark) && mark == markB, "IpMap Fill[v6-2]: 9d9b address has bad mark.");
-  tb.check(map.contains(&a_0000_0001, &mark) && mark == markC, "IpMap Fill[v6-2]: ::1 has bad mark.");
-}
diff --git a/lib/ts/Layout.cc b/lib/ts/Layout.cc
index 483dcf1..da52989 100644
--- a/lib/ts/Layout.cc
+++ b/lib/ts/Layout.cc
@@ -28,6 +28,9 @@
 #include "ts/ink_string.h"
 #include "ts/I_Layout.h"
 
+#include <fstream>
+#include <unordered_map>
+
 static Layout *layout = nullptr;
 
 Layout *
@@ -41,134 +44,168 @@ Layout::get()
 }
 
 void
-Layout::create(const char *prefix)
+Layout::create(ts::string_view const prefix)
 {
   if (layout == nullptr) {
     layout = new Layout(prefix);
   }
 }
 
-static char *
-layout_relative(const char *root, const char *file)
+static void
+_relative(char *path, size_t buffsz, ts::string_view root, ts::string_view file)
 {
-  char path[PATH_NAME_MAX];
-
-  if (ink_filepath_merge(path, PATH_NAME_MAX, root, file, INK_FILEPATH_TRUENAME)) {
+  if (ink_filepath_merge(path, buffsz, root.data(), file.data(), INK_FILEPATH_TRUENAME)) {
     int err = errno;
     // Log error
     if (err == EACCES) {
-      ink_error("Cannot merge path '%s' above the root '%s'\n", file, root);
+      ink_fatal("Cannot merge path '%s' above the root '%s'\n", file.data(), root.data());
     } else if (err == E2BIG) {
-      ink_error("Exceeding file name length limit of %d characters\n", PATH_NAME_MAX);
+      ink_fatal("Exceeding file name length limit of %d characters\n", PATH_NAME_MAX);
     } else {
       // TODO: Make some pretty errors.
-      ink_error("Cannot merge '%s' with '%s' error=%d\n", file, root, err);
+      ink_fatal("Cannot merge '%s' with '%s' error=%d\n", file.data(), root.data(), err);
     }
-    return nullptr;
   }
-  return ats_strdup(path);
 }
 
-char *
-Layout::relative(const char *file)
+static std::string
+layout_relative(ts::string_view root, ts::string_view file)
 {
-  return layout_relative(prefix, file);
+  char path[PATH_NAME_MAX];
+  std::string ret;
+  _relative(path, PATH_NAME_MAX, root, file);
+  ret = path;
+  return ret;
 }
 
-void
-Layout::relative(char *buf, size_t bufsz, const char *file)
+std::string
+Layout::relative(ts::string_view file)
 {
-  char path[PATH_NAME_MAX];
-
-  if (ink_filepath_merge(path, PATH_NAME_MAX, prefix, file, INK_FILEPATH_TRUENAME)) {
-    int err = errno;
-    // Log error
-    if (err == EACCES) {
-      ink_error("Cannot merge path '%s' above the root '%s'\n", file, prefix);
-    } else if (err == E2BIG) {
-      ink_error("Exceeding file name length limit of %d characters\n", PATH_NAME_MAX);
-    } else {
-      // TODO: Make some pretty errors.
-      ink_error("Cannot merge '%s' with '%s' error=%d\n", file, prefix, err);
-    }
-    return;
-  }
-  size_t path_len = strlen(path) + 1;
-  if (path_len > bufsz) {
-    ink_error("Provided buffer is too small: %zu, required %zu\n", bufsz, path_len);
-  } else {
-    ink_strlcpy(buf, path, bufsz);
-  }
+  return layout_relative(prefix, file);
 }
 
+// for updating the structure sysconfdir
 void
-Layout::update_sysconfdir(const char *dir)
+Layout::update_sysconfdir(ts::string_view dir)
 {
-  if (sysconfdir) {
-    ats_free(sysconfdir);
-  }
-
-  sysconfdir = ats_strdup(dir);
+  sysconfdir.assign(dir.data(), dir.size());
 }
 
-char *
-Layout::relative_to(const char *dir, const char *file)
+std::string
+Layout::relative_to(ts::string_view dir, ts::string_view file)
 {
   return layout_relative(dir, file);
 }
 
 void
-Layout::relative_to(char *buf, size_t bufsz, const char *dir, const char *file)
+Layout::relative_to(char *buf, size_t bufsz, ts::string_view dir, ts::string_view file)
 {
   char path[PATH_NAME_MAX];
 
-  if (ink_filepath_merge(path, PATH_NAME_MAX, dir, file, INK_FILEPATH_TRUENAME)) {
-    int err = errno;
-    // Log error
-    if (err == EACCES) {
-      ink_error("Cannot merge path '%s' above the root '%s'\n", file, dir);
-    } else if (err == E2BIG) {
-      ink_error("Exceeding file name length limit of %d characters\n", PATH_NAME_MAX);
-    } else {
-      // TODO: Make some pretty errors.
-      ink_error("Cannot merge '%s' with '%s' error=%d\n", file, dir, err);
-    }
-    return;
-  }
+  _relative(path, PATH_NAME_MAX, dir, file);
   size_t path_len = strlen(path) + 1;
   if (path_len > bufsz) {
-    ink_error("Provided buffer is too small: %zu, required %zu\n", bufsz, path_len);
+    ink_fatal("Provided buffer is too small: %zu, required %zu\n", bufsz, path_len);
   } else {
     ink_strlcpy(buf, path, bufsz);
   }
 }
 
-Layout::Layout(const char *_prefix)
+bool
+Layout::check_runroot()
 {
-  if (_prefix) {
-    prefix = ats_strdup(_prefix);
+  std::string yaml_path = {};
+
+  if (getenv("USING_RUNROOT") == nullptr) {
+    return false;
   } else {
-    char *env_path;
-    char path[PATH_NAME_MAX];
-    int len;
+    std::string env_path = getenv("USING_RUNROOT");
+    int len              = env_path.size();
+    if ((len + 1) > PATH_NAME_MAX) {
+      ink_fatal("TS_RUNROOT environment variable is too big: %d, max %d\n", len, PATH_NAME_MAX - 1);
+    }
+    std::ifstream file;
+    if (env_path.back() != '/') {
+      env_path.append("/");
+    }
+    yaml_path = env_path + "runroot_path.yaml";
 
-    if ((env_path = getenv("TS_ROOT"))) {
-      len = strlen(env_path);
+    file.open(yaml_path);
+    if (!file.good()) {
+      ink_warning("Bad env path, continue with default value");
+      return false;
+    }
+  }
+  std::ifstream yamlfile(yaml_path);
+  std::unordered_map<std::string, std::string> runroot_map;
+  std::string str;
+  while (std::getline(yamlfile, str)) {
+    int pos = str.find(':');
+    runroot_map[str.substr(0, pos)] = str.substr(pos + 2);
+  }
+  for (auto it : runroot_map) {
+    prefix        = runroot_map["prefix"];
+    exec_prefix   = runroot_map["exec_prefix"];
+    bindir        = runroot_map["bindir"];
+    sbindir       = runroot_map["sbindir"];
+    sysconfdir    = runroot_map["sysconfdir"];
+    datadir       = runroot_map["datadir"];
+    includedir    = runroot_map["includedir"];
+    libdir        = runroot_map["libdir"];
+    libexecdir    = runroot_map["libexecdir"];
+    localstatedir = runroot_map["localstatedir"];
+    runtimedir    = runroot_map["runtimedir"];
+    logdir        = runroot_map["logdir"];
+    mandir        = runroot_map["mandir"];
+    infodir       = runroot_map["infodir"];
+    cachedir      = runroot_map["cachedir"];
+  }
+
+  // // for yaml lib operations
+  // YAML::Node yamlfile = YAML::LoadFile(yaml_path);
+  // prefix              = yamlfile["prefix"].as<string>();
+  // exec_prefix         = yamlfile["exec_prefix"].as<string>();
+  // bindir              = yamlfile["bindir"].as<string>();
+  // sbindir             = yamlfile["sbindir"].as<string>();
+  // sysconfdir          = yamlfile["sysconfdir"].as<string>();
+  // datadir             = yamlfile["datadir"].as<string>();
+  // includedir          = yamlfile["includedir"].as<string>();
+  // libdir              = yamlfile["libdir"].as<string>();
+  // libexecdir          = yamlfile["libexecdir"].as<string>();
+  // localstatedir       = yamlfile["localstatedir"].as<string>();
+  // runtimedir          = yamlfile["runtimedir"].as<string>();
+  // logdir              = yamlfile["logdir"].as<string>();
+  // mandir              = yamlfile["mandir"].as<string>();
+  // infodir             = yamlfile["infodir"].as<string>();
+  // cachedir            = yamlfile["cachedir"].as<string>();
+  return true;
+}
+
+Layout::Layout(ts::string_view const _prefix)
+{
+  if (!_prefix.empty()) {
+    prefix.assign(_prefix.data(), _prefix.size());
+  } else {
+    std::string path;
+    int len;
+    if (check_runroot()) {
+      return;
+    }
+    if (getenv("TS_ROOT") != nullptr) {
+      std::string env_path(getenv("TS_ROOT"));
+      len = env_path.size();
       if ((len + 1) > PATH_NAME_MAX) {
-        ink_error("TS_ROOT environment variable is too big: %d, max %d\n", len, PATH_NAME_MAX - 1);
-        return;
+        ink_fatal("TS_ROOT environment variable is too big: %d, max %d\n", len, PATH_NAME_MAX - 1);
       }
-      ink_strlcpy(path, env_path, sizeof(path));
-      while (len > 1 && path[len - 1] == '/') {
-        path[len - 1] = '\0';
-        --len;
+      path = env_path;
+      while (path.back() == '/') {
+        path.pop_back();
       }
     } else {
       // Use compile time --prefix
-      ink_strlcpy(path, TS_BUILD_PREFIX, sizeof(path));
+      path = TS_BUILD_PREFIX;
     }
-
-    prefix = ats_strdup(path);
+    prefix = path;
   }
   exec_prefix   = layout_relative(prefix, TS_BUILD_EXEC_PREFIX);
   bindir        = layout_relative(prefix, TS_BUILD_BINDIR);
@@ -188,19 +225,4 @@ Layout::Layout(const char *_prefix)
 
 Layout::~Layout()
 {
-  ats_free(prefix);
-  ats_free(exec_prefix);
-  ats_free(bindir);
-  ats_free(sbindir);
-  ats_free(sysconfdir);
-  ats_free(datadir);
-  ats_free(includedir);
-  ats_free(libdir);
-  ats_free(libexecdir);
-  ats_free(localstatedir);
-  ats_free(runtimedir);
-  ats_free(logdir);
-  ats_free(mandir);
-  ats_free(infodir);
-  ats_free(cachedir);
 }
diff --git a/lib/ts/Makefile.am b/lib/ts/Makefile.am
index 449e03b..efd87d5 100644
--- a/lib/ts/Makefile.am
+++ b/lib/ts/Makefile.am
@@ -184,6 +184,7 @@ libtsutil_la_SOURCES = \
   SourceLocation.cc \
   SourceLocation.h \
   string_view.h \
+  BufferWriter.h \
   TestBox.h \
   TextBuffer.cc \
   TextBuffer.h \
@@ -252,8 +253,11 @@ test_tslib_CPPFLAGS = $(AM_CPPFLAGS)\
 # add you catch based test file here for tslib
 test_tslib_LDADD = libtsutil.la
 test_tslib_SOURCES = \
-	unit-tests/main.cpp \
-	unit-tests/string_view.cpp
+	unit-tests/unit_test_main.cc \
+	unit-tests/test_BufferWriter.cc \
+	unit-tests/test_IpMap.cc \
+	unit-tests/test_layout.cc \
+	unit-tests/test_string_view.cc
 
 CompileParseRules_SOURCES = CompileParseRules.cc
 
diff --git a/lib/ts/PriorityQueue.h b/lib/ts/PriorityQueue.h
index c0d2d11..06ea052 100644
--- a/lib/ts/PriorityQueue.h
+++ b/lib/ts/PriorityQueue.h
@@ -48,6 +48,7 @@ public:
   PriorityQueue() {}
   ~PriorityQueue() {}
   bool empty();
+  bool in(PriorityQueueEntry<T> *entry);
   PriorityQueueEntry<T> *top();
   void pop();
   void push(PriorityQueueEntry<T> *);
@@ -73,6 +74,13 @@ PriorityQueue<T, Comp>::dump() const
 
 template <typename T, typename Comp>
 bool
+PriorityQueue<T, Comp>::in(PriorityQueueEntry<T> *entry)
+{
+  return _v.in(entry) != NULL;
+}
+
+template <typename T, typename Comp>
+bool
 PriorityQueue<T, Comp>::empty()
 {
   return _v.length() == 0;
@@ -110,7 +118,9 @@ PriorityQueue<T, Comp>::pop()
     return;
   }
 
+  const uint32_t original_index = _v[0]->index;
   _swap(0, _v.length() - 1);
+  _v[_v.length() - 1]->index = original_index;
   _v.pop();
   _bubble_down(0);
 }
@@ -123,11 +133,19 @@ PriorityQueue<T, Comp>::erase(PriorityQueueEntry<T> *entry)
     return;
   }
 
+  // If the entry doesn't belong to this queue just return.
+  if (entry != _v[entry->index]) {
+    ink_assert(!_v.in(entry));
+    return;
+  }
+
   ink_release_assert(entry->index < _v.length());
   const uint32_t original_index = entry->index;
   if (original_index != (_v.length() - 1)) {
     // Move the erased item to the end to be popped off
     _swap(original_index, _v.length() - 1);
+    // Fix the index before we pop it
+    _v[_v.length() - 1]->index = original_index;
     _v.pop();
     _bubble_down(original_index);
     _bubble_up(original_index);
diff --git a/lib/ts/apidefs.h.in b/lib/ts/apidefs.h.in
index 79fc0f7..2a32afd 100644
--- a/lib/ts/apidefs.h.in
+++ b/lib/ts/apidefs.h.in
@@ -709,7 +709,6 @@ typedef enum {
   TS_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER_MARK,
   TS_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER_MARK,
   TS_CONFIG_HTTP_CACHE_RANGE_LOOKUP,
-  TS_CONFIG_HTTP_NORMALIZE_AE_GZIP,
   TS_CONFIG_HTTP_DEFAULT_BUFFER_SIZE,
   TS_CONFIG_HTTP_DEFAULT_BUFFER_WATER_MARK,
   TS_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE,
@@ -756,6 +755,8 @@ typedef enum {
   TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME,
   TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS,
   TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT,
+  TS_CONFIG_HTTP_NORMALIZE_AE,
+  TS_CONFIG_HTTP_INSERT_FORWARDED,
   TS_CONFIG_LAST_ENTRY
 } TSOverridableConfigKey;
 
@@ -800,6 +801,8 @@ typedef enum {
   TS_MILESTONE_SM_FINISH,
   TS_MILESTONE_PLUGIN_ACTIVE,
   TS_MILESTONE_PLUGIN_TOTAL,
+  TS_MILESTONE_TLS_HANDSHAKE_START,
+  TS_MILESTONE_TLS_HANDSHAKE_END,
   TS_MILESTONE_LAST_ENTRY
 } TSMilestonesType;
 
@@ -1006,6 +1009,7 @@ extern tsapi const char *TS_MIME_FIELD_WARNING;
 extern tsapi const char *TS_MIME_FIELD_WWW_AUTHENTICATE;
 extern tsapi const char *TS_MIME_FIELD_XREF;
 extern tsapi const char *TS_MIME_FIELD_X_FORWARDED_FOR;
+extern tsapi const char *TS_MIME_FIELD_FORWARDED;
 
 /* --------------------------------------------------------------------------
    MIME fields string lengths */
@@ -1081,6 +1085,7 @@ extern tsapi int TS_MIME_LEN_WARNING;
 extern tsapi int TS_MIME_LEN_WWW_AUTHENTICATE;
 extern tsapi int TS_MIME_LEN_XREF;
 extern tsapi int TS_MIME_LEN_X_FORWARDED_FOR;
+extern tsapi int TS_MIME_LEN_FORWARDED;
 
 /* --------------------------------------------------------------------------
    HTTP values */
@@ -1207,7 +1212,7 @@ typedef enum {
 } TSUuidVersion;
 
 #define TS_UUID_STRING_LEN 36
-#define TS_CRUUID_STRING_LEN (TS_UUID_STRING_LEN + 20 + 1) /* UUID-len + len(int64_t) + '-' */
+#define TS_CRUUID_STRING_LEN (TS_UUID_STRING_LEN + 19 + 1) /* UUID-len + len(uint64_t) + '-' */
 typedef struct tsapi_uuid *TSUuid;
 
 #ifdef __cplusplus
diff --git a/lib/ts/ink_args.cc b/lib/ts/ink_args.cc
index b2a61c1..4954bff 100644
--- a/lib/ts/ink_args.cc
+++ b/lib/ts/ink_args.cc
@@ -221,6 +221,9 @@ process_args_ex(const AppVersionInfo *appinfo, const ArgumentDescription *argume
     if ((*argv)[1] == '-') {
       // Deal with long options ...
       for (i = 0; i < n_argument_descriptions; i++) {
+        if (!strcmp(argument_descriptions[i].name, "run-root")) {
+          break;
+        }
         if (!strcmp(argument_descriptions[i].name, (*argv) + 2)) {
           *argv += strlen(*argv) - 1;
           if (!process_arg(appinfo, argument_descriptions, n_argument_descriptions, i, &argv)) {
diff --git a/lib/ts/ink_args.h b/lib/ts/ink_args.h
index ad7ecd4..549fcdd 100644
--- a/lib/ts/ink_args.h
+++ b/lib/ts/ink_args.h
@@ -75,6 +75,10 @@ struct ArgumentDescription {
   {                                                                          \
     "help", 'h', "Print usage information", nullptr, nullptr, nullptr, usage \
   }
+#define RUNROOT_ARGUMENT_DESCRIPTION()                                                 \
+  {                                                                                    \
+    "run-root", '-', "using TS_RUNROOT as sandbox", nullptr, nullptr, nullptr, nullptr \
+  }
 
 /* Global Data
 */
diff --git a/lib/ts/ink_cap.cc b/lib/ts/ink_cap.cc
index 5e70ad8..5e713f0 100644
--- a/lib/ts/ink_cap.cc
+++ b/lib/ts/ink_cap.cc
@@ -351,6 +351,17 @@ elevating_chmod(const char *path, int perm)
   return ret;
 }
 
+int
+elevating_stat(const char *path, struct stat *buff)
+{
+  int ret = stat(path, buff);
+  if (ret != 0 && (EPERM == errno || EACCES == errno)) {
+    ElevateAccess access(ElevateAccess::FILE_PRIVILEGE);
+    return stat(path, buff);
+  }
+  return ret;
+}
+
 #if TS_USE_POSIX_CAP
 /** Acquire file access privileges to bypass DAC.
     @a level is a mask of the specific file access capabilities to acquire.
diff --git a/lib/ts/ink_cap.h b/lib/ts/ink_cap.h
index ac4aa2b..f2ebf31 100644
--- a/lib/ts/ink_cap.h
+++ b/lib/ts/ink_cap.h
@@ -53,6 +53,8 @@ extern FILE *elevating_fopen(const char *path, const char *mode);
 
 // chmod a file, elevating if necessary
 extern int elevating_chmod(const char *path, int perm);
+/// @c stat a file, evelating only if needed.
+extern int elevating_stat(const char *path, struct stat *buff);
 
 /** Control generate of core file on crash.
     @a flag sets whether core files are enabled on crash.
diff --git a/lib/ts/ink_defs.h b/lib/ts/ink_defs.h
index 0f80a81..f85589d 100644
--- a/lib/ts/ink_defs.h
+++ b/lib/ts/ink_defs.h
@@ -88,33 +88,6 @@ countof(const T (&)[N])
 #define ABS(x) (((x) < 0) ? (-(x)) : (x))
 #endif
 
-#ifndef MAX
-#define MAX(x, y) (((x) >= (y)) ? (x) : (y))
-#endif
-
-#ifndef MIN
-#define MIN(x, y) (((x) <= (y)) ? (x) : (y))
-#endif
-
-#ifdef __cplusplus
-// We can't use #define for min and max because it will conflict with
-// other declarations of min and max functions.  This conflict
-// occurs with STL
-template <class T>
-T
-min(const T a, const T b)
-{
-  return a < b ? a : b;
-}
-
-template <class T>
-T
-max(const T a, const T b)
-{
-  return a > b ? a : b;
-}
-#endif
-
 #define ATS_UNUSED __attribute__((unused))
 #define ATS_WARN_IF_UNUSED __attribute__((warn_unused_result))
 #define ATS_UNUSED_RETURN(x) \
diff --git a/lib/ts/ink_inet.cc b/lib/ts/ink_inet.cc
index 39b66c7..2467916 100644
--- a/lib/ts/ink_inet.cc
+++ b/lib/ts/ink_inet.cc
@@ -28,7 +28,7 @@
 #include "ts/ink_code.h"
 #include "ts/ink_assert.h"
 #include "ts/TestBox.h"
-#include "ts/TextBuffer.h"
+#include <fstream>
 
 IpAddr const IpAddr::INVALID;
 
@@ -650,7 +650,6 @@ REGRESSION_TEST(Ink_Inet)(RegressionTest *t, int /* atype */, int *pstatus)
 int
 ats_tcp_somaxconn()
 {
-  int fd;
   int value = 0;
 
 /* Darwin version ... */
@@ -661,20 +660,15 @@ ats_tcp_somaxconn()
   }
 #endif
 
-  fd = open("/proc/sys/net/ipv4/tcp_max_syn_backlog", O_RDONLY);
-  if (fd != -1) {
-    TextBuffer text(0);
-    text.slurp(fd);
-    if (!text.empty()) {
-      value = strtoul(text.bufPtr(), nullptr, 10);
-    }
-    close(fd);
+  std::ifstream f("/proc/sys/net/ipv4/tcp_max_syn_backlog", std::ifstream::in);
+  if (f.good()) {
+    f >> value;
   }
 
   // Default to the compatible value we used before detection. SOMAXCONN is the right
   // macro to use, but most systems set this to 128, which is just too small.
   if (value <= 0) {
-    return 1024;
+    value = 1024;
   }
 
   return value;
diff --git a/lib/ts/ink_memory.cc b/lib/ts/ink_memory.cc
index 6811c4e..9cbcbd7 100644
--- a/lib/ts/ink_memory.cc
+++ b/lib/ts/ink_memory.cc
@@ -159,9 +159,9 @@ ats_mallopt(int param ATS_UNUSED, int value ATS_UNUSED)
 #if TS_HAS_TCMALLOC
 // TODO: tcmalloc code ?
 #else
-#if defined(linux)
+#if defined(__GLIBC__)
   return mallopt(param, value);
-#endif // ! defined(linux)
+#endif // ! defined(__GLIBC__)
 #endif // ! TS_HAS_TCMALLOC
 #endif // ! TS_HAS_JEMALLOC
   return 0;
diff --git a/lib/ts/ink_memory.h b/lib/ts/ink_memory.h
index 0264f77..cd4c7a1 100644
--- a/lib/ts/ink_memory.h
+++ b/lib/ts/ink_memory.h
@@ -27,7 +27,9 @@
 #include <string.h>
 #include <strings.h>
 #include <inttypes.h>
+#include <string>
 
+#include "ts/string_view.h"
 #include "ts/ink_config.h"
 
 #ifdef __cplusplus
@@ -135,6 +137,16 @@ static inline size_t __attribute__((const)) ats_pagesize(void)
 char *_xstrdup(const char *str, int length, const char *path);
 
 #define ats_strdup(p) _xstrdup((p), -1, nullptr)
+
+// this is to help with migration to a std::string issue with older code that
+// expects char* being copied. As more code moves to std::string, this can be
+// removed to avoid these extra copies.
+inline char *
+ats_stringdup(std::string const &p)
+{
+  return p.empty() ? nullptr : _xstrdup(p.c_str(), p.size(), nullptr);
+}
+
 #define ats_strndup(p, n) _xstrdup((p), n, nullptr)
 
 #ifdef __cplusplus
@@ -457,6 +469,22 @@ public:
   explicit ats_scoped_str(size_t n) : super(static_cast<char *>(ats_malloc(n))) {}
   /// Put string @a s in this container for cleanup.
   explicit ats_scoped_str(char *s) : super(s) {}
+  // constructor with std::string
+  explicit ats_scoped_str(const std::string &s)
+  {
+    if (s.empty())
+      _r = nullptr;
+    else
+      _r = strdup(s.c_str());
+  }
+  // constructor with string_view
+  explicit ats_scoped_str(const ts::string_view &s)
+  {
+    if (s.empty())
+      _r = nullptr;
+    else
+      _r = strdup(s.data());
+  }
   /// Assign a string @a s to this container.
   self &
   operator=(char *s)
@@ -464,6 +492,26 @@ public:
     super::operator=(s);
     return *this;
   }
+  // std::string case
+  self &
+  operator=(const std::string &s)
+  {
+    if (s.empty())
+      _r = nullptr;
+    else
+      _r = strdup(s.c_str());
+    return *this;
+  }
+  // string_view case
+  self &
+  operator=(const ts::string_view &s)
+  {
+    if (s.empty())
+      _r = nullptr;
+    else
+      _r = strdup(s.data());
+    return *this;
+  }
 };
 
 /** Specialization of @c ats_scoped_resource for pointers allocated with @c ats_malloc.
diff --git a/lib/ts/ink_platform.h b/lib/ts/ink_platform.h
index 95050d6..95e3eab 100644
--- a/lib/ts/ink_platform.h
+++ b/lib/ts/ink_platform.h
@@ -27,36 +27,25 @@
 
 #include "ts/ink_config.h"
 
-#ifdef HAVE_STDLIB_H
-#include <stdlib.h>
-#endif
-#include <ctype.h>
-#ifdef HAVE_STRING_H
-#include <string.h>
-#endif
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#include <errno.h>
-#ifdef HAVE_SYS_TYPES_H
-#include <sys/types.h>
-#endif
-#ifdef HAVE_SYS_STAT_H
-#include <sys/stat.h>
+// Gnu C++ doesn't define __STDC__ == 0 as needed to
+// have ip_hl be defined.
+#if defined(__GNUC__) && !defined(__STDC__)
+#define __STDC__ 0
 #endif
-#include <fcntl.h>
 
+#include <fcntl.h>
 #include <limits.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <sys/stat.h>
 #include <assert.h>
 #include <time.h>
 #include <sys/time.h>
 #include <sys/uio.h>
 #include <sys/file.h>
 #include <sys/resource.h>
+#include <errno.h>
+#include <syslog.h>
+#include <pwd.h>
+#include <poll.h>
+#include <dirent.h>
 
 #include <sys/ipc.h>
 #include <sys/shm.h>
@@ -65,10 +54,32 @@
 #include <sys/param.h>
 #include <sys/un.h>
 
-#include <sys/wait.h>
 #include <sys/socket.h>
 #include <sys/mman.h>
+#include <sys/wait.h>
 
+struct ifafilt;
+#include <net/if.h>
+
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <ctype.h>
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
 #ifdef HAVE_NETINET_IN_H
 #include <netinet/in.h>
 #endif
@@ -101,13 +112,6 @@
 #ifdef HAVE_SIGINFO_H
 #include <siginfo.h>
 #endif
-#ifdef HAVE_WAIT_H
-#include <wait.h>
-#endif
-
-#include <syslog.h>
-#include <pwd.h>
-#include <poll.h>
 
 #if TS_USE_EPOLL
 #include <sys/epoll.h>
@@ -126,28 +130,14 @@
 #include <alloca.h>
 #endif
 
-#include <errno.h>
-#include <dirent.h>
-
 #ifdef HAVE_CPIO_H
 #include <cpio.h>
 #endif
 
-struct ifafilt;
-#include <net/if.h>
-
 #ifdef HAVE_STROPTS_H
 #include <stropts.h>
 #endif
 
-//
-// Gnu C++ doesn't define __STDC__ == 0 as needed to
-// have ip_hl be defined.
-//
-#if defined(__GNUC__) && !defined(__STDC__)
-#define __STDC__ 0
-#endif
-
 #ifdef HAVE_MACHINE_ENDIAN_H
 #include <machine/endian.h>
 #endif
@@ -165,8 +155,6 @@ struct ifafilt;
 #include <sys/sockio.h>
 #endif
 
-#include <resolv.h>
-
 #if defined(linux)
 typedef unsigned int in_addr_t;
 #endif
@@ -199,6 +187,9 @@ typedef unsigned int in_addr_t;
 #include <sys/prctl.h>
 #endif
 
+// Unconditionally included headers that depend on conditionally included ones.
+#include <resolv.h> // Must go after the netinet includes for FreeBSD
+
 #ifndef PATH_NAME_MAX
 #define PATH_NAME_MAX 4096 // instead of PATH_MAX which is inconsistent
                            // on various OSs (linux-4096,osx/bsd-1024,
diff --git a/lib/ts/ink_sock.cc b/lib/ts/ink_sock.cc
index 6f694d2..167e0ee 100644
--- a/lib/ts/ink_sock.cc
+++ b/lib/ts/ink_sock.cc
@@ -290,6 +290,11 @@ bind_unix_domain_socket(const char *path, mode_t mode)
     return sockfd;
   }
 
+  if (strlen(path) > sizeof(sockaddr.sun_path) - 1) {
+    errno = ENAMETOOLONG;
+    goto fail;
+  }
+
   ink_zero(sockaddr);
   sockaddr.sun_family = AF_UNIX;
   ink_strlcpy(sockaddr.sun_path, path, sizeof(sockaddr.sun_path));
diff --git a/lib/ts/runroot.cc b/lib/ts/runroot.cc
new file mode 100644
index 0000000..ef49ce0
--- /dev/null
+++ b/lib/ts/runroot.cc
@@ -0,0 +1,144 @@
+/** @file
+
+  A brief file prefix
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+*/
+
+/*
+This file contains the function of the runroot handler for TS_RUNROOT
+handle the --run-root for every command or program
+
+Goal: set up an ENV variable for Layout.cc to use as TS_RUNROOT sandbox
+easy & clean
+
+Example: ./traffic_server --run-root=/path/to/sandbox
+
+Need a yaml file in the sandbox with key value pairs of all directory locations for other programs to use
+
+Directories needed in the yaml file:
+prefix, exec_prefix, includedir, localstatedir, bindir, logdir, mandir, sbindir, sysconfdir,
+datadir, libexecdir, libdir, runtimedir, infodir, cachedir.
+*/
+
+#include "ts/ink_error.h"
+
+#include <vector>
+#include <string>
+#include <fstream>
+#include <set>
+#include <unistd.h>
+
+#define MAX_CWD_LEN 1024
+
+// the function for the checking of the yaml file in parent path
+// if found return the parent path containing the yaml file
+static std::string
+check_parent_path(const std::string &path, bool json = false)
+{
+  std::string whole_path = path;
+  if (whole_path.back() == '/')
+    whole_path.pop_back();
+
+  while (whole_path != "") {
+    whole_path                   = whole_path.substr(0, whole_path.find_last_of("/"));
+    std::string parent_yaml_path = whole_path + "/runroot_path.yaml";
+    std::ifstream parent_check_file;
+    parent_check_file.open(parent_yaml_path);
+    if (parent_check_file.good()) {
+      if (!json)
+        ink_notice("using parent of bin/current working dir");
+      return whole_path;
+    }
+  }
+  return {};
+}
+
+// until I get a <filesystem> impl in
+bool
+is_directory(const char *directory)
+{
+  struct stat buffer;
+  int result = stat(directory, &buffer);
+  return (!result && (S_IFDIR & buffer.st_mode)) ? true : false;
+}
+
+// handler for ts runroot
+void
+runroot_handler(const char **argv, bool json = false)
+{
+  std::string command = {};
+  std::string arg     = {};
+  std::string prefix  = "--run-root";
+
+  int i = 0;
+  while (argv[i]) {
+    command = argv[i];
+    if (command.substr(0, prefix.size()) == prefix) {
+      arg = command;
+      break;
+    }
+    i++;
+  }
+  if (arg.empty())
+    return;
+
+  // 1. check pass in path
+  prefix += "=";
+  if (arg.substr(0, prefix.size()) == prefix) {
+    std::ifstream yaml_checkfile;
+    std::string path = arg.substr(prefix.size(), arg.size() - 1);
+
+    if (path.back() != '/')
+      path.append("/");
+
+    std::string yaml_path = path + "runroot_path.yaml";
+    yaml_checkfile.open(yaml_path);
+    if (yaml_checkfile.good()) {
+      if (!json)
+        ink_notice("using command line path as RUNROOT");
+      setenv("USING_RUNROOT", path.c_str(), true);
+      return;
+    } else {
+      if (!json)
+        ink_warning("bad RUNROOT");
+    }
+  }
+  // 2. argv provided invalid/no yaml file, then check env variable
+  char *env_val = getenv("TS_RUNROOT");
+  if ((env_val != nullptr) && is_directory(env_val)) {
+    setenv("USING_RUNROOT", env_val, true);
+    if (!json)
+      ink_notice("using the environment variable TS_RUNROOT");
+    return;
+  }
+  // 3. find parent path of bin/pwd to check
+  char cwd[MAX_CWD_LEN]      = {0};
+  char RealBinPath[PATH_MAX] = {0};
+  if ((argv[0] != nullptr) && (getcwd(cwd, sizeof(cwd)) != nullptr) && (realpath(argv[0], RealBinPath) != nullptr)) {
+    std::vector<std::string> TwoPath = {RealBinPath, cwd};
+    for (auto it : TwoPath) {
+      std::string path = check_parent_path(it);
+      if (!path.empty()) {
+        setenv("USING_RUNROOT", path.c_str(), true);
+        return;
+      }
+    }
+  }
+}
diff --git a/lib/ts/string_view.h b/lib/ts/string_view.h
index ff902bf..9535e22 100644
--- a/lib/ts/string_view.h
+++ b/lib/ts/string_view.h
@@ -33,7 +33,6 @@
 #include <utility>
 #include <string>
 #include <ostream>
-#include <ts/ink_memory.h>
 
 #if __cplusplus < 201402
 #define CONSTEXPR14 inline
@@ -241,9 +240,6 @@ public:
   // std::string constructor
   constexpr basic_string_view(std::string const &rhs) noexcept : m_data(rhs.data()), m_size(rhs.size()) {}
 
-  // ats_scoped_str constructor
-  constexpr basic_string_view(ats_scoped_str const &rhs) noexcept : m_data(rhs.get()), m_size(traits_type::length(rhs.get())) {}
-
   // For iterator on string_view we don't need to deal with const and non-const as different types
   // they are all const iterators as the string values are immutable
   // keep in mind that the string view is mutable in what it points to
@@ -1214,3 +1210,12 @@ operator<<(std::basic_ostream<_Type, _Traits> &os, const basic_string_view<_Type
 using string_view = basic_string_view<char>;
 
 } // namespace ts
+
+/// Literal suffix for string_view.
+/// @note This enables @c string_view literals from C++ string literals in @c constexpr contexts, which
+/// is not the case for the character pointer constructor.
+/// @internal This must be in the global namespace to be found.
+constexpr ts::string_view operator"" _sv(const char *str, size_t len) noexcept
+{
+  return ts::string_view(str, len);
+}
diff --git a/lib/ts/test_PriorityQueue.cc b/lib/ts/test_PriorityQueue.cc
index 73b20f8..6206c3c 100644
--- a/lib/ts/test_PriorityQueue.cc
+++ b/lib/ts/test_PriorityQueue.cc
@@ -367,12 +367,26 @@ REGRESSION_TEST(PriorityQueue_6)(RegressionTest *t, int /* atype ATS_UNUSED */,
   pq->push(entry_b);
   pq->push(entry_c);
 
+  uint32_t index;
+
   box.check(pq->top() == entry_a, "top should be entry_a");
+
+  index = entry_a->index;
   pq->erase(entry_a);
+  box.check(entry_a->index == index, "index should be the same");
+
   box.check(pq->top() == entry_b, "top should be entry_b");
+
+  index = entry_c->index;
   pq->erase(entry_c);
+  box.check(entry_c->index == index, "index should be the same");
+
   box.check(pq->top() == entry_b, "top should be entry_b");
+
+  index = entry_b->index;
   pq->erase(entry_b);
+  box.check(entry_b->index == index, "index should be the same");
+
   box.check(pq->top() == nullptr, "top should be NULL");
   box.check(pq->empty(), "should be empty");
 
@@ -461,3 +475,82 @@ REGRESSION_TEST(PriorityQueue_7)(RegressionTest *t, int /* atype ATS_UNUSED */,
   delete entry_y;
   delete entry_z;
 }
+
+// Test erase and pop method to ensure the index entries are correctly
+REGRESSION_TEST(PriorityQueue_pop_and_erase)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  PQ *pq1 = new PQ();
+  PQ *pq2 = new PQ();
+
+  N *x = new N(20, "X");
+  N *y = new N(30, "Y");
+  N *z = new N(40, "Z");
+
+  Entry *entry_x = new Entry(x);
+  Entry *entry_y = new Entry(y);
+  Entry *entry_z = new Entry(z);
+
+  pq2->push(entry_z);
+  pq2->push(entry_y);
+  pq2->push(entry_x);
+
+  x->weight = 40;
+  y->weight = 30;
+  z->weight = 20;
+
+  pq1->push(pq2->top());
+  pq2->pop();
+  box.check(pq1->top()->index == 0, "Top index should be zero, but got %d", pq1->top()->index);
+
+  pq1->push(pq2->top());
+  pq2->pop();
+  box.check(pq1->top()->index == 0, "Top index should be zero, but got %d", pq1->top()->index);
+
+  pq1->push(pq2->top());
+  pq2->pop();
+  box.check(pq1->top()->index == 0, "Top index should be zero, but got %d", pq1->top()->index);
+
+  delete pq1;
+  delete pq2;
+
+  delete x;
+  delete y;
+  delete z;
+
+  delete entry_x;
+  delete entry_y;
+  delete entry_z;
+}
+
+REGRESSION_TEST(PriorityQueue_pop_and_erase_2)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  PQ *pq1 = new PQ();
+
+  N *x = new N(20, "X");
+  N *y = new N(30, "Y");
+
+  Entry *X = new Entry(x);
+  Entry *Y = new Entry(y);
+
+  box.check(X->index == 0 && Y->index == 0, "X and Y index should be 0");
+
+  pq1->push(X);
+
+  pq1->erase(Y);
+
+  box.check(pq1->top() == X, "X should be in queue");
+
+  delete x;
+  delete y;
+
+  delete X;
+  delete Y;
+
+  delete pq1;
+}
diff --git a/lib/ts/unit-tests/test_BufferWriter.cc b/lib/ts/unit-tests/test_BufferWriter.cc
new file mode 100644
index 0000000..3bbd3cb
--- /dev/null
+++ b/lib/ts/unit-tests/test_BufferWriter.cc
@@ -0,0 +1,322 @@
+/** @file
+
+    Unit tests for BufferWriter.h.
+
+    @section license License
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ */
+
+#include "BufferWriter.h"
+
+#include "catch.hpp"
+
+#include "string_view.h"
+
+#include <cstring>
+
+namespace
+{
+ts::string_view three[] = {"a", "", "bcd"};
+}
+
+TEST_CASE("BufferWriter::write(StringView)", "[BWWSV]")
+{
+  class X : public ts::BufferWriter
+  {
+    size_t i, j;
+
+  public:
+    bool good;
+
+    X() : i(0), j(0), good(true) {}
+
+    X &
+    write(char c) override
+    {
+      while (j == three[i].size()) {
+        ++i;
+        j = 0;
+      }
+
+      if ((i >= 3) or (c != three[i][j])) {
+        good = false;
+      }
+
+      ++j;
+
+      return *this;
+    }
+
+    bool
+    error() const override
+    {
+      return false;
+    }
+
+    // Dummies.
+    const char *
+    data() const override
+    {
+      return nullptr;
+    }
+    size_t
+    capacity() const override
+    {
+      return 0;
+    }
+    size_t
+    extent() const override
+    {
+      return 0;
+    }
+    X &clip(size_t) override { return *this; }
+    X &extend(size_t) override { return *this; }
+  };
+
+  X x;
+
+  static_cast<ts::BufferWriter &>(x).write(three[0]).write(three[1]).write(three[2]);
+
+  REQUIRE(x.good);
+}
+
+namespace
+{
+template <size_t N> using LBW = ts::LocalBufferWriter<N>;
+}
+
+TEST_CASE("Minimal Local Buffer Writer", "[BWLM]")
+{
+  LBW<1> bw;
+
+  REQUIRE(!((bw.capacity() != 1) or (bw.size() != 0) or bw.error() or (bw.remaining() != 1)));
+
+  bw.write('#');
+
+  REQUIRE(!((bw.capacity() != 1) or (bw.size() != 1) or bw.error() or (bw.remaining() != 0)));
+
+  REQUIRE(bw.view() == "#");
+
+  bw.write('#');
+
+  REQUIRE(bw.error());
+
+  bw.reduce(1);
+
+  REQUIRE(!((bw.capacity() != 1) or (bw.size() != 1) or bw.error() or (bw.remaining() != 0)));
+
+  REQUIRE(bw.view() == "#");
+}
+
+namespace
+{
+template <class BWType>
+bool
+twice(BWType &bw)
+{
+  if ((bw.capacity() != 20) or (bw.size() != 0) or bw.error() or (bw.remaining() != 20)) {
+    return false;
+  }
+
+  bw.write('T');
+
+  if ((bw.capacity() != 20) or (bw.size() != 1) or bw.error() or (bw.remaining() != 19)) {
+    return false;
+  }
+
+  if (bw.view() != "T") {
+    return false;
+  }
+
+  bw.write("he").write(' ').write("quick").write(' ').write("brown");
+
+  if ((bw.capacity() != 20) or bw.error() or (bw.remaining() != (21 - sizeof("The quick brown")))) {
+    return false;
+  }
+
+  if (bw.view() != "The quick brown") {
+    return false;
+  }
+
+  bw.reduce(0);
+
+  bw << "The" << ' ' << "quick" << ' ' << "brown";
+
+  if ((bw.capacity() != 20) or bw.error() or (bw.remaining() != (21 - sizeof("The quick brown")))) {
+    return false;
+  }
+
+  if (bw.view() != "The quick brown") {
+    return false;
+  }
+
+  bw.reduce(0);
+
+  bw.write("The", 3).write(' ').write("quick", 5).write(' ').write(ts::string_view("brown", 5));
+
+  if ((bw.capacity() != 20) or bw.error() or (bw.remaining() != (21 - sizeof("The quick brown")))) {
+    return false;
+  }
+
+  if (bw.view() != "The quick brown") {
+    return false;
+  }
+
+  std::strcpy(bw.auxBuffer(), " fox");
+  bw.write(sizeof(" fox") - 1);
+
+  if (bw.error()) {
+    return false;
+  }
+
+  if (bw.view() != "The quick brown fox") {
+    return false;
+  }
+
+  bw.write('x');
+
+  if (bw.error()) {
+    return false;
+  }
+
+  bw.write('x');
+
+  if (!bw.error()) {
+    return false;
+  }
+
+  bw.write('x');
+
+  if (!bw.error()) {
+    return false;
+  }
+
+  bw.reduce(sizeof("The quick brown fox") - 1);
+
+  if (bw.error()) {
+    return false;
+  }
+
+  if (bw.view() != "The quick brown fox") {
+    return false;
+  }
+
+  bw.reduce(sizeof("The quick brown") - 1);
+  bw.clip(bw.capacity() + 2 - (sizeof("The quick brown fox") - 1)).write(" fox");
+
+  if (bw.view() != "The quick brown f") {
+    return false;
+  }
+
+  if (!bw.error()) {
+    return false;
+  }
+
+  bw.extend(2).write("ox");
+
+  if (bw.error()) {
+    return false;
+  }
+
+  if (bw.view() != "The quick brown fox") {
+    return false;
+  }
+
+  return true;
+}
+
+} // end anonymous namespace
+
+TEST_CASE("Concrete Buffer Writers 2", "[BWC2]")
+{
+  LBW<20> bw;
+
+  REQUIRE(twice(bw));
+
+  char space[21];
+
+  space[20] = '!';
+
+  ts::FixedBufferWriter fbw(space, 20);
+
+  REQUIRE(twice(fbw));
+
+  REQUIRE(space[20] == '!');
+
+  LBW<20> bw2(bw), bw3;
+
+  REQUIRE(bw2.view() == "The quick brown fox");
+
+  bw3 = bw2;
+
+  REQUIRE(bw3.view() == "The quick brown fox");
+}
+
+TEST_CASE("Discard Buffer Writer", "[BWD]")
+{
+  char scratch[1] = {'!'};
+  ts::FixedBufferWriter bw(scratch, 0);
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == 0);
+
+  bw.write('T');
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == 1);
+
+  bw.write("he").write(' ').write("quick").write(' ').write("brown");
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == (sizeof("The quick brown") - 1));
+
+  bw.reduce(0);
+
+  bw.write("The", 3).write(' ').write("quick", 5).write(' ').write(ts::string_view("brown", 5));
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == (sizeof("The quick brown") - 1));
+
+  bw.write(sizeof(" fox") - 1);
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == (sizeof("The quick brown fox") - 1));
+
+  bw.reduce(sizeof("The quick brown fox") - 1);
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == (sizeof("The quick brown fox") - 1));
+
+  bw.reduce(sizeof("The quick brown") - 1);
+
+  REQUIRE(bw.size() == 0);
+  REQUIRE(bw.extent() == (sizeof("The quick brown") - 1));
+
+  // Make sure no actual writing.
+  //
+  REQUIRE(scratch[0] == '!');
+}
+
+TEST_CASE("Buffer Writer << operator", "[BW<<]")
+{
+  ts::LocalBufferWriter<50> bw;
+
+  bw << "The" << ' ' << "quick" << ' ' << "brown fox";
+
+  REQUIRE(bw.view() == "The quick brown fox");
+}
diff --git a/lib/ts/unit-tests/test_IpMap.cc b/lib/ts/unit-tests/test_IpMap.cc
new file mode 100644
index 0000000..3ffd66c
--- /dev/null
+++ b/lib/ts/unit-tests/test_IpMap.cc
@@ -0,0 +1,606 @@
+/** @file
+
+    IpMap unit tests.
+
+    @section license License
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+*/
+
+#include <ts/IpMap.h>
+#include <catch.hpp>
+
+std::ostream &
+operator<<(std::ostream &s, IpEndpoint const &addr)
+{
+  ip_text_buffer b;
+  ats_ip_ntop(addr, b, sizeof(b));
+  s << b;
+  return s;
+}
+
+void
+IpMapTestPrint(IpMap &map)
+{
+  printf("IpMap Dump\n");
+  for (IpMap::iterator spot(map.begin()), limit(map.end()); spot != limit; ++spot) {
+    ip_text_buffer ipb1, ipb2;
+
+    printf("%s - %s : %p\n", ats_ip_ntop(spot->min(), ipb1, sizeof ipb1), ats_ip_ntop(spot->max(), ipb2, sizeof(ipb2)),
+           spot->data());
+  }
+  printf("\n");
+}
+
+// --- Test helper classes ---
+class MapMarkedAt : public Catch::MatcherBase<IpMap>
+{
+  IpEndpoint const &_addr;
+
+public:
+  MapMarkedAt(IpEndpoint const &addr) : _addr(addr) {}
+
+  virtual bool
+  match(IpMap const &map) const override
+  {
+    return map.contains(&_addr);
+  }
+
+  virtual std::string
+  describe() const override
+  {
+    std::ostringstream ss;
+    ss << _addr << " is marked";
+    return ss.str();
+  }
+};
+
+// The builder function
+inline MapMarkedAt
+IsMarkedAt(IpEndpoint const &_addr)
+{
+  return {_addr};
+}
+
+class MapMarkedWith : public Catch::MatcherBase<IpMap>
+{
+  IpEndpoint const &_addr;
+  void *_mark;
+  mutable bool _found_p = false;
+
+public:
+  MapMarkedWith(IpEndpoint const &addr, void *mark) : _addr(addr), _mark(mark) {}
+
+  virtual bool
+  match(IpMap const &map) const override
+  {
+    void *mark = nullptr;
+    return (_found_p = map.contains(&_addr, &mark)) && mark == _mark;
+  }
+
+  virtual std::string
+  describe() const override
+  {
+    std::ostringstream ss;
+    if (_found_p) {
+      ss << "is marked at " << _addr << " with " << std::hex << reinterpret_cast<intptr_t>(_mark);
+    } else {
+      ss << "is not marked at " << _addr;
+    }
+    return ss.str();
+  }
+};
+
+inline MapMarkedWith
+IsMarkedWith(IpEndpoint const &addr, void *mark)
+{
+  return {addr, mark};
+}
+
+// -------------
+// --- TESTS ---
+// -------------
+TEST_CASE("IpMap Basic", "[libts][ipmap]")
+{
+  IpMap map;
+  void *const markA = reinterpret_cast<void *>(1);
+  void *const markB = reinterpret_cast<void *>(2);
+  void *const markC = reinterpret_cast<void *>(3);
+  void *mark; // for retrieval
+
+  in_addr_t ip5 = htonl(5), ip9 = htonl(9);
+  in_addr_t ip10 = htonl(10), ip15 = htonl(15), ip20 = htonl(20);
+  in_addr_t ip50 = htonl(50), ip60 = htonl(60);
+  in_addr_t ip100 = htonl(100), ip120 = htonl(120), ip140 = htonl(140);
+  in_addr_t ip150 = htonl(150), ip160 = htonl(160);
+  in_addr_t ip200 = htonl(200);
+  in_addr_t ip0   = 0;
+  in_addr_t ipmax = ~static_cast<in_addr_t>(0);
+
+  map.mark(ip10, ip20, markA);
+  map.mark(ip5, ip9, markA);
+  {
+    INFO("Coalesce failed");
+    CHECK(map.getCount() == 1);
+  }
+  {
+    INFO("Range max not found.");
+    CHECK(map.contains(ip9));
+  }
+  {
+    INFO("Span min not found");
+    CHECK(map.contains(ip10, &mark));
+  }
+  {
+    INFO("Mark not preserved.");
+    CHECK(mark == markA);
+  }
+
+  map.fill(ip15, ip100, markB);
+  {
+    INFO("Fill failed.");
+    CHECK(map.getCount() == 2);
+  }
+  {
+    INFO("fill interior missing");
+    CHECK(map.contains(ip50, &mark));
+  }
+  {
+    INFO("Fill mark not preserved.");
+    CHECK(mark == markB);
+  }
+  {
+    INFO("Span min not found.");
+    CHECK(!map.contains(ip200));
+  }
+  {
+    INFO("Old span interior not found");
+    CHECK(map.contains(ip15, &mark));
+  }
+  {
+    INFO("Fill overwrote mark.");
+    CHECK(mark == markA);
+  }
+
+  map.clear();
+  {
+    INFO("Clear failed.");
+    CHECK(map.getCount() == 0);
+  }
+
+  map.mark(ip20, ip50, markA);
+  map.mark(ip100, ip150, markB);
+  map.fill(ip10, ip200, markC);
+  CHECK(map.getCount() == 5);
+  {
+    INFO("Left span missing");
+    CHECK(map.contains(ip15, &mark));
+  }
+  {
+    INFO("Middle span missing");
+    CHECK(map.contains(ip60, &mark));
+  }
+  {
+    INFO("fill mark wrong.");
+    CHECK(mark == markC);
+  }
+  {
+    INFO("right span missing.");
+    CHECK(map.contains(ip160));
+  }
+  {
+    INFO("right span missing");
+    CHECK(map.contains(ip120, &mark));
+  }
+  {
+    INFO("wrong data on right mark span.");
+    CHECK(mark == markB);
+  }
+
+  map.unmark(ip140, ip160);
+  {
+    INFO("unmark failed");
+    CHECK(map.getCount() == 5);
+  }
+  {
+    INFO("unmark left edge still there.");
+    CHECK(!map.contains(ip140));
+  }
+  {
+    INFO("unmark middle still there.");
+    CHECK(!map.contains(ip150));
+  }
+  {
+    INFO("unmark right edge still there.");
+    CHECK(!map.contains(ip160));
+  }
+
+  map.clear();
+  map.mark(ip20, ip20, markA);
+  {
+    INFO("Map failed on singleton insert");
+    CHECK(map.contains(ip20));
+  }
+  map.mark(ip10, ip200, markB);
+  mark = 0;
+  map.contains(ip20, &mark);
+  {
+    INFO("Map held singleton against range.");
+    CHECK(mark == markB);
+  }
+  map.mark(ip100, ip120, markA);
+  map.mark(ip150, ip160, markB);
+  map.mark(ip0, ipmax, markC);
+  {
+    INFO("IpMap: Full range fill left extra ranges.");
+    CHECK(map.getCount() == 1);
+  }
+}
+
+TEST_CASE("IpMap Unmark", "[libts][ipmap]")
+{
+  IpMap map;
+  //  ip_text_buffer ipb1, ipb2;
+  void *const markA = reinterpret_cast<void *>(1);
+
+  IpEndpoint a_0, a_0_0_0_16, a_0_0_0_17, a_max;
+  IpEndpoint a_10_28_56_0, a_10_28_56_4, a_10_28_56_255;
+  IpEndpoint a_10_28_55_255, a_10_28_57_0;
+  IpEndpoint a_63_128_1_12;
+  IpEndpoint a_loopback, a_loopback2;
+  IpEndpoint a6_0, a6_max, a6_fe80_9d90, a6_fe80_9d9d, a6_fe80_9d95;
+
+  ats_ip_pton("0.0.0.0", &a_0);
+  ats_ip_pton("0.0.0.16", &a_0_0_0_16);
+  ats_ip_pton("0.0.0.17", &a_0_0_0_17);
+  ats_ip_pton("255.255.255.255", &a_max);
+  ats_ip_pton("10.28.55.255", &a_10_28_55_255);
+  ats_ip_pton("10.28.56.0", &a_10_28_56_0);
+  ats_ip_pton("10.28.56.4", &a_10_28_56_4);
+  ats_ip_pton("10.28.56.255", &a_10_28_56_255);
+  ats_ip_pton("10.28.57.0", &a_10_28_57_0);
+  ats_ip_pton("63.128.1.12", &a_63_128_1_12);
+  ats_ip_pton("::", &a6_0);
+  ats_ip_pton("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", &a6_max);
+  ats_ip_pton("fe80::221:9bff:fe10:9d90", &a6_fe80_9d90);
+  ats_ip_pton("fe80::221:9bff:fe10:9d9d", &a6_fe80_9d9d);
+  ats_ip_pton("fe80::221:9bff:fe10:9d95", &a6_fe80_9d95);
+  ats_ip_pton("127.0.0.1", &a_loopback);
+  ats_ip_pton("127.0.0.255", &a_loopback2);
+
+  map.mark(&a_0, &a_max, markA);
+  {
+    INFO("IpMap Unmark: Full range not single.");
+    CHECK(map.getCount() == 1);
+  }
+  map.unmark(&a_10_28_56_0, &a_10_28_56_255);
+  {
+    INFO("IpMap Unmark: Range unmark failed.");
+    CHECK(map.getCount() == 2);
+  }
+  // Generic range check.
+  {
+    INFO("IpMap Unmark: Range unmark min address not removed.");
+    CHECK(!map.contains(&a_10_28_56_0));
+  }
+  {
+    INFO("IpMap Unmark: Range unmark max address not removed.");
+    CHECK(!map.contains(&a_10_28_56_255));
+  }
+  {
+    INFO("IpMap Unmark: Range unmark min-1 address removed.");
+    CHECK(map.contains(&a_10_28_55_255));
+  }
+  {
+    INFO("IpMap Unmark: Range unmark max+1 address removed.");
+    CHECK(map.contains(&a_10_28_57_0));
+  }
+  // Test min bounded range.
+  map.unmark(&a_0, &a_0_0_0_16);
+  {
+    INFO("IpMap Unmark: Range unmark zero address not removed.");
+    CHECK(!map.contains(&a_0));
+  }
+  {
+    INFO("IpMap Unmark: Range unmark zero bounded range max not removed.");
+    CHECK(!map.contains(&a_0_0_0_16));
+  }
+  {
+    INFO("IpMap Unmark: Range unmark zero bounded range max+1 removed.");
+    CHECK(map.contains(&a_0_0_0_17));
+  }
+}
+
+TEST_CASE("IpMap Fill", "[libts][ipmap]")
+{
+  IpMap map;
+  void *const allow = reinterpret_cast<void *>(0);
+  void *const deny  = reinterpret_cast<void *>(~0);
+  void *const markA = reinterpret_cast<void *>(1);
+  void *const markB = reinterpret_cast<void *>(2);
+  void *const markC = reinterpret_cast<void *>(3);
+  void *mark; // for retrieval
+
+  IpEndpoint a0, a_10_28_56_0, a_10_28_56_4, a_10_28_56_255, a3, a4;
+  IpEndpoint a_9_255_255_255, a_10_0_0_0, a_10_0_0_19, a_10_0_0_255, a_10_0_1_0;
+  IpEndpoint a_max, a_loopback, a_loopback2;
+  IpEndpoint a_10_28_55_255, a_10_28_57_0;
+  IpEndpoint a_63_128_1_12;
+  IpEndpoint a_0000_0000, a_0000_0001, a_ffff_ffff;
+  IpEndpoint a_fe80_9d8f, a_fe80_9d90, a_fe80_9d95, a_fe80_9d9d, a_fe80_9d9e;
+
+  ats_ip_pton("0.0.0.0", &a0);
+  ats_ip_pton("255.255.255.255", &a_max);
+
+  ats_ip_pton("9.255.255.255", &a_9_255_255_255);
+  ats_ip_pton("10.0.0.0", &a_10_0_0_0);
+  ats_ip_pton("10.0.0.19", &a_10_0_0_19);
+  ats_ip_pton("10.0.0.255", &a_10_0_0_255);
+  ats_ip_pton("10.0.1.0", &a_10_0_1_0);
+
+  ats_ip_pton("10.28.55.255", &a_10_28_55_255);
+  ats_ip_pton("10.28.56.0", &a_10_28_56_0);
+  ats_ip_pton("10.28.56.4", &a_10_28_56_4);
+  ats_ip_pton("10.28.56.255", &a_10_28_56_255);
+  ats_ip_pton("10.28.57.0", &a_10_28_57_0);
+
+  ats_ip_pton("192.168.1.0", &a3);
+  ats_ip_pton("192.168.1.255", &a4);
+
+  ats_ip_pton("::", &a_0000_0000);
+  ats_ip_pton("::1", &a_0000_0001);
+  ats_ip_pton("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", &a_ffff_ffff);
+  ats_ip_pton("fe80::221:9bff:fe10:9d8f", &a_fe80_9d8f);
+  ats_ip_pton("fe80::221:9bff:fe10:9d90", &a_fe80_9d90);
+  ats_ip_pton("fe80::221:9bff:fe10:9d95", &a_fe80_9d95);
+  ats_ip_pton("fe80::221:9bff:fe10:9d9d", &a_fe80_9d9d);
+  ats_ip_pton("fe80::221:9bff:fe10:9d9e", &a_fe80_9d9e);
+
+  ats_ip_pton("127.0.0.0", &a_loopback);
+  ats_ip_pton("127.0.0.255", &a_loopback2);
+  ats_ip_pton("63.128.1.12", &a_63_128_1_12);
+
+  SECTION("subnet overfill")
+  {
+    map.fill(&a_10_28_56_0, &a_10_28_56_255, deny);
+    map.fill(&a0, &a_max, allow);
+    CHECK_THAT(map, IsMarkedWith(a_10_28_56_4, deny));
+  }
+
+  SECTION("singleton overfill")
+  {
+    map.fill(&a_loopback, &a_loopback, allow);
+    {
+      INFO("singleton not marked.");
+      CHECK_THAT(map, IsMarkedAt(a_loopback));
+    }
+    map.fill(&a0, &a_max, deny);
+    THEN("singleton mark")
+    {
+      CHECK_THAT(map, IsMarkedWith(a_loopback, allow));
+      THEN("not empty")
+      {
+        REQUIRE(map.begin() != map.end());
+        IpMap::iterator spot = map.begin();
+        ++spot;
+        THEN("more than one range")
+        {
+          REQUIRE(spot != map.end());
+          THEN("ranges disjoint")
+          {
+            INFO(" " << map.begin()->max() << " < " << spot->min());
+            REQUIRE(-1 == ats_ip_addr_cmp(map.begin()->max(), spot->min()));
+          }
+        }
+      }
+    }
+  }
+
+  SECTION("3")
+  {
+    map.fill(&a_loopback, &a_loopback2, markA);
+    map.fill(&a_10_28_56_0, &a_10_28_56_255, markB);
+    {
+      INFO("over extended range");
+      CHECK_THAT(map, !IsMarkedWith(a_63_128_1_12, markC));
+    }
+    map.fill(&a0, &a_max, markC);
+    {
+      INFO("IpMap[2]: Fill failed.");
+      CHECK(map.getCount() == 5);
+    }
+    {
+      INFO("invalid mark in range gap");
+      CHECK_THAT(map, IsMarkedWith(a_63_128_1_12, markC));
+    }
+  }
+
+  SECTION("4")
+  {
+    map.fill(&a_10_0_0_0, &a_10_0_0_255, allow);
+    map.fill(&a_loopback, &a_loopback2, allow);
+    {
+      INFO("invalid mark between ranges");
+      CHECK_THAT(map, !IsMarkedAt(a_63_128_1_12));
+    }
+    {
+      INFO("invalid mark in lower range");
+      CHECK_THAT(map, IsMarkedWith(a_10_0_0_19, allow));
+    }
+    map.fill(&a0, &a_max, deny);
+    {
+      INFO("range count incorrect");
+      CHECK(map.getCount() == 5);
+    }
+    {
+      INFO("mark between ranges");
+      CHECK_THAT(map, IsMarkedWith(a_63_128_1_12, deny));
+    }
+
+    map.fill(&a_fe80_9d90, &a_fe80_9d9d, markA);
+    map.fill(&a_0000_0001, &a_0000_0001, markA);
+    map.fill(&a_0000_0000, &a_ffff_ffff, markB);
+
+    {
+      INFO("IpMap Fill[v6]: Zero address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_0000_0000, markB));
+    }
+    {
+      INFO("IpMap Fill[v6]: Max address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_ffff_ffff, markB));
+    }
+    {
+      INFO("IpMap Fill[v6]: 9d90 address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d90, markA));
+    }
+    {
+      INFO("IpMap Fill[v6]: 9d8f address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d8f, markB));
+    }
+    {
+      INFO("IpMap Fill[v6]: 9d9d address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d9d, markA));
+    }
+    {
+      INFO("IpMap Fill[v6]: 9d9b address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d9e, markB));
+    }
+    {
+      INFO("IpMap Fill[v6]: ::1 has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_0000_0001, markA));
+    }
+
+    {
+      INFO("IpMap Fill[pre-refill]: Bad range count.");
+      CHECK(map.getCount() == 10);
+    }
+    // These should be ignored by the map as it is completely covered for IPv6.
+    map.fill(&a_fe80_9d90, &a_fe80_9d9d, markA);
+    map.fill(&a_0000_0001, &a_0000_0001, markC);
+    map.fill(&a_0000_0000, &a_ffff_ffff, markB);
+    {
+      INFO("IpMap Fill[post-refill]: Bad range count.");
+      CHECK(map.getCount() == 10);
+    }
+  }
+
+  SECTION("5")
+  {
+    map.fill(&a_fe80_9d90, &a_fe80_9d9d, markA);
+    map.fill(&a_0000_0001, &a_0000_0001, markC);
+    map.fill(&a_0000_0000, &a_ffff_ffff, markB);
+    {
+      INFO("IpMap Fill[v6-2]: Zero address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_0000_0000, markB));
+    }
+    {
+      INFO("IpMap Fill[v6-2]: Max address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_ffff_ffff, markB));
+    }
+    {
+      INFO("IpMap Fill[v6-2]: 9d90 address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d90, markA));
+    }
+    {
+      INFO("IpMap Fill[v6-2]: 9d8f address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d8f, markB));
+    }
+    {
+      INFO("IpMap Fill[v6-2]: 9d9d address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d9d, markA));
+    }
+    {
+      INFO("IpMap Fill[v6-2]: 9d9b address has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_fe80_9d9e, markB));
+    }
+    {
+      INFO("IpMap Fill[v6-2]: ::1 has bad mark.");
+      CHECK_THAT(map, IsMarkedWith(a_0000_0001, markC));
+    }
+  }
+}
+
+TEST_CASE("IpMap CloseIntersection", "[libts][ipmap]")
+{
+  IpMap map;
+  void *const markA = reinterpret_cast<void *>(1);
+  void *const markB = reinterpret_cast<void *>(2);
+  void *const markC = reinterpret_cast<void *>(3);
+  void *const markD = reinterpret_cast<void *>(4);
+  // void *mark; // for retrieval
+
+  IpEndpoint a_1_l, a_1_u, a_2_l, a_2_u, a_3_l, a_3_u, a_4_l, a_4_u, a_5_l, a_5_u, a_6_l, a_6_u, a_7_l, a_7_u;
+  IpEndpoint b_1_l, b_1_u;
+  IpEndpoint c_1_l, c_1_u, c_2_l, c_2_u, c_3_l, c_3_u;
+  IpEndpoint d_1_l, d_1_u, d_2_l, d_2_u;
+
+  IpEndpoint a_1_m;
+
+  ats_ip_pton("123.88.172.0", &a_1_l);
+  ats_ip_pton("123.88.180.93", &a_1_m);
+  ats_ip_pton("123.88.191.255", &a_1_u);
+  ats_ip_pton("123.89.132.0", &a_2_l);
+  ats_ip_pton("123.89.135.255", &a_2_u);
+  ats_ip_pton("123.89.160.0", &a_3_l);
+  ats_ip_pton("123.89.167.255", &a_3_u);
+  ats_ip_pton("123.90.108.0", &a_4_l);
+  ats_ip_pton("123.90.111.255", &a_4_u);
+  ats_ip_pton("123.90.152.0", &a_5_l);
+  ats_ip_pton("123.90.159.255", &a_5_u);
+  ats_ip_pton("123.91.0.0", &a_6_l);
+  ats_ip_pton("123.91.35.255", &a_6_u);
+  ats_ip_pton("123.91.40.0", &a_7_l);
+  ats_ip_pton("123.91.47.255", &a_7_u);
+
+  ats_ip_pton("123.78.100.0", &b_1_l);
+  ats_ip_pton("123.78.115.255", &b_1_u);
+
+  ats_ip_pton("123.88.204.0", &c_1_l);
+  ats_ip_pton("123.88.219.255", &c_1_u);
+  ats_ip_pton("123.90.112.0", &c_2_l);
+  ats_ip_pton("123.90.119.255", &c_2_u);
+  ats_ip_pton("123.90.132.0", &c_3_l);
+  ats_ip_pton("123.90.135.255", &c_3_u);
+
+  ats_ip_pton("123.82.196.0", &d_1_l);
+  ats_ip_pton("123.82.199.255", &d_1_u);
+  ats_ip_pton("123.82.204.0", &d_2_l);
+  ats_ip_pton("123.82.219.255", &d_2_u);
+
+  map.mark(a_1_l, a_1_u, markA);
+  map.mark(a_2_l, a_2_u, markA);
+  map.mark(a_3_l, a_3_u, markA);
+  map.mark(a_4_l, a_4_u, markA);
+  map.mark(a_5_l, a_5_u, markA);
+  map.mark(a_6_l, a_6_u, markA);
+  map.mark(a_7_l, a_7_u, markA);
+  CHECK_THAT(map, IsMarkedAt(a_1_m));
+
+  map.mark(b_1_l, b_1_u, markB);
+  CHECK_THAT(map, IsMarkedAt(a_1_m));
+
+  map.mark(c_1_l, c_1_u, markC);
+  map.mark(c_2_l, c_2_u, markC);
+  map.mark(c_3_l, c_3_u, markC);
+  CHECK_THAT(map, IsMarkedAt(a_1_m));
+
+  map.mark(d_1_l, d_1_u, markD);
+  map.mark(d_2_l, d_2_u, markD);
+  CHECK_THAT(map, IsMarkedAt(a_1_m));
+
+  CHECK(map.getCount() == 13);
+}
diff --git a/lib/ts/unit-tests/test_layout.cc b/lib/ts/unit-tests/test_layout.cc
new file mode 100644
index 0000000..59facae
--- /dev/null
+++ b/lib/ts/unit-tests/test_layout.cc
@@ -0,0 +1,89 @@
+/** @file
+  Test file for layout structure
+  @section license License
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ */
+
+#include "catch.hpp"
+
+#include "ts/I_Layout.h"
+#include "ts/ink_platform.h"
+
+std::string
+append_slash(const char *path)
+{
+  std::string ret(path);
+  if (ret.back() != '/')
+    ret.append("/");
+  return ret;
+}
+
+// test cases: [constructor], [env_constructor], [create], [relative], [relative_to], [update_sysconfdir]
+// ======= test for layout ========
+
+TEST_CASE("constructor test", "[constructor]")
+{
+  Layout layout;
+  // test for constructor
+  REQUIRE(layout.prefix == TS_BUILD_PREFIX);
+  REQUIRE(layout.sysconfdir == layout.relative(TS_BUILD_SYSCONFDIR));
+}
+
+TEST_CASE("environment variable constructor test", "[env_constructor]")
+{
+  std::string newpath = append_slash(TS_BUILD_PREFIX) + "env";
+  setenv("TS_ROOT", newpath.c_str(), true);
+
+  Layout layout;
+  REQUIRE(layout.prefix == newpath);
+  REQUIRE(layout.sysconfdir == layout.relative_to(newpath, TS_BUILD_SYSCONFDIR));
+  unsetenv("TS_ROOT");
+}
+
+TEST_CASE("layout create test", "[create]")
+{
+  Layout::create();
+  REQUIRE(Layout::get()->prefix == TS_BUILD_PREFIX);
+  REQUIRE(Layout::get()->sysconfdir == Layout::get()->relative(TS_BUILD_SYSCONFDIR));
+}
+
+// tests below based on the created layout
+TEST_CASE("relative test", "[relative]")
+{
+  // relative (1 argument)
+  ts::string_view sv("file");
+  std::string str1 = append_slash(TS_BUILD_PREFIX) + "file";
+  REQUIRE(Layout::get()->relative(sv) == str1);
+}
+
+TEST_CASE("relative to test", "[relative_to]")
+{
+  // relative to (2 parameters)
+  std::string str1 = append_slash(TS_BUILD_PREFIX) + "file";
+  REQUIRE(Layout::relative_to(Layout::get()->prefix, "file") == str1);
+
+  // relative to (4 parameters)
+  char config_file[PATH_NAME_MAX];
+  Layout::relative_to(config_file, sizeof(config_file), Layout::get()->sysconfdir, "records.config");
+  std::string a = Layout::relative_to(Layout::get()->sysconfdir, "records.config");
+  std::string b = config_file;
+  REQUIRE(a == b);
+}
+
+TEST_CASE("update_sysconfdir test", "[update_sysconfdir]")
+{
+  Layout::get()->update_sysconfdir("/abc");
+  REQUIRE(Layout::get()->sysconfdir == "/abc");
+}
diff --git a/lib/ts/unit-tests/string_view.cpp b/lib/ts/unit-tests/test_string_view.cc
similarity index 91%
rename from lib/ts/unit-tests/string_view.cpp
rename to lib/ts/unit-tests/test_string_view.cc
index a49c5a0..2bac5b8 100644
--- a/lib/ts/unit-tests/string_view.cpp
+++ b/lib/ts/unit-tests/test_string_view.cc
@@ -18,10 +18,6 @@
 
 #include "catch.hpp"
 
-#ifndef _DEBUG
-#define _DEBUG
-#endif
-
 #include "string_view.h"
 #include <iostream>
 #include <string>
@@ -44,6 +40,19 @@ TEST_CASE("constructor calls", "[string_view] [constructor]")
     REQUIRE(sv.length() == 5);
     REQUIRE(sv.empty() == false);
     REQUIRE(sv == "hello");
+
+    constexpr ts::string_view a{"evil dave"_sv};
+    REQUIRE(a.size() == 9);
+    REQUIRE(a.length() == 9);
+    REQUIRE(a.empty() == false);
+    REQUIRE(a == "evil dave");
+
+    auto b = "grigor rulz"_sv;
+    REQUIRE((std::is_same<decltype(b), ts::string_view>::value) == true);
+    REQUIRE(b.size() == 11);
+    REQUIRE(b.length() == 11);
+    REQUIRE(b.empty() == false);
+    REQUIRE(b == "grigor rulz");
   }
 
   SECTION("operator =")
@@ -106,11 +115,11 @@ TEST_CASE("constructor calls", "[string_view] [constructor]")
 
   SECTION("= operator")
   {
-    string std_string = "hello";
-    ts::string_view sv = std_string;
-    char str1[10] = "hello";
+    string std_string   = "hello";
+    ts::string_view sv  = std_string;
+    char str1[10]       = "hello";
     ts::string_view sv2 = str1;
-    char const *str2 = "hello";
+    char const *str2    = "hello";
     ts::string_view sv3 = str2;
 
     REQUIRE(sv == "hello");
@@ -122,16 +131,16 @@ TEST_CASE("constructor calls", "[string_view] [constructor]")
 TEST_CASE("operators", "[string_view] [operator]")
 {
   SECTION("==")
- {
+  {
     ts::string_view sv("hello");
 
-    char str1[10] = "hello";
+    char str1[10]    = "hello";
     char const *str2 = "hello";
-    string str3 = "hello";
+    string str3      = "hello";
 
     REQUIRE(str2 == str3);
     REQUIRE(str1 == str3);
-    
+
     REQUIRE(sv == "hello");
     REQUIRE(sv == str1);
     REQUIRE(sv == str2);
@@ -141,9 +150,9 @@ TEST_CASE("operators", "[string_view] [operator]")
   {
     ts::string_view sv("hello");
 
-    char str1[10] = "hhhhhhhhh";
+    char str1[10]    = "hhhhhhhhh";
     char const *str2 = "hella";
-    string str3 = "";
+    string str3      = "";
 
     REQUIRE(str2 != str3);
     REQUIRE(str1 != str3);
@@ -156,9 +165,9 @@ TEST_CASE("operators", "[string_view] [operator]")
   {
     ts::string_view sv("hello");
 
-    char str1[10] = "a";
+    char str1[10]    = "a";
     char const *str2 = "abcdefg";
-    string str3 = "";
+    string str3      = "";
 
     REQUIRE(sv > str1);
     REQUIRE(sv > str2);
@@ -168,9 +177,9 @@ TEST_CASE("operators", "[string_view] [operator]")
   {
     ts::string_view sv("hello");
 
-    char str1[10] = "z";
+    char str1[10]    = "z";
     char const *str2 = "zaaaaaa";
-    string str3 = "hellz";
+    string str3      = "hellz";
 
     REQUIRE(sv < str1);
     REQUIRE(sv < str2);
@@ -180,9 +189,9 @@ TEST_CASE("operators", "[string_view] [operator]")
   {
     ts::string_view sv("hello");
 
-    char str1[10] = "hello";
+    char str1[10]    = "hello";
     char const *str2 = "abcdefg";
-    string str3 = "";
+    string str3      = "";
 
     REQUIRE(sv >= str1);
     REQUIRE(sv >= str2);
@@ -192,9 +201,9 @@ TEST_CASE("operators", "[string_view] [operator]")
   {
     ts::string_view sv("hello");
 
-    char str1[10] = "hello";
+    char str1[10]    = "hello";
     char const *str2 = "zaaaaaa";
-    string str3 = "hellz";
+    string str3      = "hellz";
 
     REQUIRE(sv <= str1);
     REQUIRE(sv <= str2);
@@ -250,8 +259,7 @@ TEST_CASE("Access & iterators", "[string_view] [access]")
     REQUIRE(*sv.crend() == '\0');
 
     int n = 0;
-    for (auto it : sv)
-    {
+    for (auto it : sv) {
       REQUIRE(it == sv[n]);
       n++;
     }
@@ -279,8 +287,13 @@ TEST_CASE("Access & iterators", "[string_view] [access]")
     REQUIRE_THROWS_AS(sv.at(100), std::out_of_range);
     REQUIRE_THROWS_AS(sv.at(-1), std::out_of_range);
 
+#if defined(_DEBUG)
     REQUIRE_THROWS_AS(sv[100], std::out_of_range);
     REQUIRE_THROWS_AS(sv[-1], std::out_of_range);
+#else
+    REQUIRE_NOTHROW(sv[100]);
+    REQUIRE_NOTHROW(sv[-1]);
+#endif
   }
 }
 
@@ -351,7 +364,7 @@ TEST_CASE("Operations", "[string_view] [operation]")
 {
   SECTION("copy")
   {
-    //weird copy
+    // weird copy
 
     // char str[10];
     // ts::string_view sv("hello");
diff --git a/lib/ts/unit-tests/main.cpp b/lib/ts/unit-tests/unit_test_main.cc
similarity index 96%
rename from lib/ts/unit-tests/main.cpp
rename to lib/ts/unit-tests/unit_test_main.cc
index 72537d2..6aed3a6 100644
--- a/lib/ts/unit-tests/main.cpp
+++ b/lib/ts/unit-tests/unit_test_main.cc
@@ -1,5 +1,5 @@
 /** @file
-    
+
   This file used for catch based tests. It is the main() stub.
 
   @section license License
@@ -21,5 +21,5 @@
   limitations under the License.
  */
 
-#define CATCH_CONFIG_MAIN 
+#define CATCH_CONFIG_MAIN
 #include "catch.hpp"
diff --git a/mgmt/Alarms.cc b/mgmt/Alarms.cc
index 6460fb3..d9e8561 100644
--- a/mgmt/Alarms.cc
+++ b/mgmt/Alarms.cc
@@ -72,7 +72,7 @@ alarm_script_dir()
     return path;
   }
 
-  return RecConfigReadBinDir();
+  return ats_stringdup(RecConfigReadBinDir());
 }
 
 Alarms::Alarms()
diff --git a/mgmt/FileManager.cc b/mgmt/FileManager.cc
index 1953ff3..8b4a190 100644
--- a/mgmt/FileManager.cc
+++ b/mgmt/FileManager.cc
@@ -593,7 +593,7 @@ FileManager::WalkSnaps(ExpandingArray *snapList)
   MFresult r;
 
   // Make sure managedDir is the latest from proxy.config.snapshot_dir.
-  this->managedDir = RecConfigReadSnapshotDir();
+  this->managedDir = ats_stringdup(RecConfigReadSnapshotDir());
 
   ink_mutex_acquire(&accessLock);
 
diff --git a/mgmt/LocalManager.cc b/mgmt/LocalManager.cc
index 0fea99b..73285ab 100644
--- a/mgmt/LocalManager.cc
+++ b/mgmt/LocalManager.cc
@@ -150,9 +150,9 @@ LocalManager::processRunning()
 LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on)
 {
   bool found;
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str bindir(RecConfigReadBinDir());
-  ats_scoped_str sysconfdir(RecConfigReadConfigDir());
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string bindir(RecConfigReadBinDir());
+  std::string sysconfdir(RecConfigReadConfigDir());
 
   manager_started_at = time(nullptr);
 
@@ -168,8 +168,8 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on)
   // Get the default IP binding values.
   RecHttpLoadIp("proxy.local.incoming_ip_to_bind", m_inbound_ip4, m_inbound_ip6);
 
-  if (access(sysconfdir, R_OK) == -1) {
-    mgmt_log("[LocalManager::LocalManager] unable to access() directory '%s': %d, %s\n", (const char *)sysconfdir, errno,
+  if (access(sysconfdir.c_str(), R_OK) == -1) {
+    mgmt_log("[LocalManager::LocalManager] unable to access() directory '%s': %d, %s\n", sysconfdir.c_str(), errno,
              strerror(errno));
     mgmt_fatal(0, "[LocalManager::LocalManager] please set the 'TS_ROOT' environment variable\n");
   }
@@ -206,7 +206,7 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on)
   env_prep                     = REC_readString("proxy.config.env_prep", &found);
 
   // Calculate proxy_binary from the absolute bin_path
-  absolute_proxy_binary = Layout::relative_to(bindir, proxy_binary);
+  absolute_proxy_binary = ats_stringdup(Layout::relative_to(bindir, proxy_binary));
 
   // coverity[fs_check_call]
   if (access(absolute_proxy_binary, R_OK | X_OK) == -1) {
@@ -240,8 +240,8 @@ LocalManager::initAlarm()
 void
 LocalManager::initMgmtProcessServer()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str sockpath(Layout::relative_to(rundir, LM_CONNECTION_SERVER));
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string sockpath(Layout::relative_to(rundir, LM_CONNECTION_SERVER));
   mode_t oldmask = umask(0);
 
 #if TS_HAS_WCCP
@@ -251,9 +251,9 @@ LocalManager::initMgmtProcessServer()
   }
 #endif
 
-  process_server_sockfd = bind_unix_domain_socket(sockpath, 00700);
+  process_server_sockfd = bind_unix_domain_socket(sockpath.c_str(), 00700);
   if (process_server_sockfd == -1) {
-    mgmt_fatal(errno, "[LocalManager::initMgmtProcessServer] failed to bind socket at %s\n", (const char *)sockpath);
+    mgmt_fatal(errno, "[LocalManager::initMgmtProcessServer] failed to bind socket at %s\n", sockpath.c_str());
   }
 
   umask(oldmask);
@@ -791,9 +791,9 @@ LocalManager::startProxy(const char *onetime_options)
       int res;
 
       char env_prep_bin[MAXPATHLEN];
-      ats_scoped_str bindir(RecConfigReadBinDir());
+      std::string bindir(RecConfigReadBinDir());
 
-      ink_filepath_make(env_prep_bin, sizeof(env_prep_bin), bindir, env_prep);
+      ink_filepath_make(env_prep_bin, sizeof(env_prep_bin), bindir.c_str(), env_prep);
       res = execl(env_prep_bin, env_prep_bin, (char *)nullptr);
       _exit(res);
     }
diff --git a/mgmt/ProcessManager.cc b/mgmt/ProcessManager.cc
index 663f206..7a249a4 100644
--- a/mgmt/ProcessManager.cc
+++ b/mgmt/ProcessManager.cc
@@ -100,7 +100,7 @@ ProcessManager::start(std::function<void()> const &cb)
 
   ink_release_assert(running == 0);
   ink_atomic_increment(&running, 1);
-  poll_thread = ink_thread_create(processManagerThread, NULL, 0, 0, NULL);
+  poll_thread = ink_thread_create(processManagerThread, nullptr, 0, 0, nullptr);
 }
 
 void
@@ -293,8 +293,8 @@ ProcessManager::processSignalQueue()
 void
 ProcessManager::initLMConnection()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str sockpath(Layout::relative_to(rundir, LM_CONNECTION_SERVER));
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string sockpath(Layout::relative_to(rundir, LM_CONNECTION_SERVER));
 
   MgmtMessageHdr *mh_full;
   int data_len;
@@ -302,11 +302,16 @@ ProcessManager::initLMConnection()
   int servlen;
   struct sockaddr_un serv_addr;
 
+  if (sockpath.length() > sizeof(serv_addr.sun_path) - 1) {
+    errno = ENAMETOOLONG;
+    Fatal("Unable to create socket '%s': %s", sockpath.c_str(), strerror(errno));
+  }
+
   /* Setup Connection to LocalManager */
   memset((char *)&serv_addr, 0, sizeof(serv_addr));
   serv_addr.sun_family = AF_UNIX;
 
-  ink_strlcpy(serv_addr.sun_path, sockpath, sizeof(serv_addr.sun_path));
+  ink_strlcpy(serv_addr.sun_path, sockpath.c_str(), sizeof(serv_addr.sun_path));
 #if defined(darwin) || defined(freebsd)
   servlen = sizeof(sockaddr_un);
 #else
@@ -314,7 +319,7 @@ ProcessManager::initLMConnection()
 #endif
 
   if ((local_manager_sockfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
-    Fatal("Unable to create socket '%s': %s", (const char *)sockpath, strerror(errno));
+    Fatal("Unable to create socket '%s': %s", sockpath.c_str(), strerror(errno));
   }
 
   if (fcntl(local_manager_sockfd, F_SETFD, FD_CLOEXEC) < 0) {
@@ -322,7 +327,7 @@ ProcessManager::initLMConnection()
   }
 
   if ((connect(local_manager_sockfd, (struct sockaddr *)&serv_addr, servlen)) < 0) {
-    Fatal("failed to connect management socket '%s': %s", (const char *)sockpath, strerror(errno));
+    Fatal("failed to connect management socket '%s': %s", sockpath.c_str(), strerror(errno));
   }
 
   data_len          = sizeof(pid_t);
diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc
index f16fd2b..afdef65 100644
--- a/mgmt/RecordsConfig.cc
+++ b/mgmt/RecordsConfig.cc
@@ -429,7 +429,7 @@ static const RecordElement RecordsConfig[] =
   ,
   {RECT_CONFIG, "proxy.config.http.origin_max_connections", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
   ,
-  {RECT_CONFIG, "proxy.config.http.origin_max_connections_queue", RECD_INT, "-1", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
+  {RECT_CONFIG, "proxy.config.http.origin_max_connections_queue", RECD_INT, "-1", RECU_DYNAMIC, RR_NULL, RECC_STR, "^-?[0-9]+$", RECA_NULL}
     ,
   {RECT_CONFIG, "proxy.config.http.origin_min_keep_alive_connections", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
   ,
@@ -560,11 +560,14 @@ static const RecordElement RecordsConfig[] =
   ,
   {RECT_CONFIG, "proxy.config.http.insert_squid_x_forwarded_for", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
   ,
+  {RECT_CONFIG, "proxy.config.http.insert_forwarded", RECD_STRING, "none", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
+  ,
   {RECT_CONFIG, "proxy.config.http.insert_age_in_response", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL}
   ,
   {RECT_CONFIG, "proxy.config.http.enable_http_stats", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL}
   ,
-  {RECT_CONFIG, "proxy.config.http.normalize_ae_gzip", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL}
+  // This defaults to a special invalid value so the HTTP transaction handling code can tell that it was not explicitly set.
+  {RECT_CONFIG, "proxy.config.http.normalize_ae", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-2]", RECA_NULL}
   ,
 
   //        ####################################################
@@ -931,6 +934,8 @@ static const RecordElement RecordsConfig[] =
   ,
   {RECT_CONFIG, "proxy.config.dns.dedicated_thread", RECD_INT, "0", RECU_RESTART_TS, RR_NULL, RECC_NULL, "[0-1]", RECA_NULL}
   ,
+  {RECT_CONFIG, "proxy.config.dns.connection.mode", RECD_INT, "0", RECU_RESTART_TS, RR_NULL, RECC_NULL, "[0-2]", RECA_NULL}
+  ,
   {RECT_CONFIG, "proxy.config.hostdb.ip_resolve", RECD_STRING, nullptr, RECU_RESTART_TS, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
   ,
 
@@ -947,6 +952,8 @@ static const RecordElement RecordsConfig[] =
   //       # in entries, may not be changed while running
   {RECT_CONFIG, "proxy.config.hostdb.max_count", RECD_INT, "-1", RECU_RESTART_TS, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
   ,
+  {RECT_CONFIG, "proxy.config.hostdb.round_robin_max_count", RECD_INT, "16", RECU_RESTART_TS, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
+  ,
   {RECT_CONFIG, "proxy.config.hostdb.storage_path", RECD_STRING, TS_BUILD_CACHEDIR, RECU_RESTART_TS, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
   ,
   {RECT_CONFIG, "proxy.config.hostdb.max_size", RECD_INT, "10M", RECU_RESTART_TS, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
@@ -1126,7 +1133,7 @@ static const RecordElement RecordsConfig[] =
   ,
   {RECT_CONFIG, "proxy.config.ssl.client.TLSv1_2", RECD_INT, "1", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-1]", RECA_NULL}
   ,
-  {RECT_CONFIG, "proxy.config.ssl.server.cipher_suite", RECD_STRING, "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256 [...]
+  {RECT_CONFIG, "proxy.config.ssl.server.cipher_suite", RECD_STRING, "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256 [...]
   ,
   {RECT_CONFIG, "proxy.config.ssl.client.cipher_suite", RECD_STRING, nullptr, RECU_RESTART_TS, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
   ,
@@ -1327,6 +1334,8 @@ static const RecordElement RecordsConfig[] =
   ,
   {RECT_CONFIG, "proxy.config.http2.active_timeout_in", RECD_INT, "900", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
   ,
+  {RECT_CONFIG, "proxy.config.http2.push_diary_size", RECD_INT, "256", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
+  ,
 
   //############
   //#
diff --git a/mgmt/Rollback.cc b/mgmt/Rollback.cc
index cd37dce..6203ea1 100644
--- a/mgmt/Rollback.cc
+++ b/mgmt/Rollback.cc
@@ -233,8 +233,8 @@ Rollback::createPathStr(version_t version)
 {
   int bufSize  = 0;
   char *buffer = nullptr;
-  ats_scoped_str sysconfdir(RecConfigReadConfigDir());
-  bufSize = strlen(sysconfdir) + fileNameLen + MAX_VERSION_DIGITS + 1;
+  std::string sysconfdir(RecConfigReadConfigDir());
+  bufSize = sysconfdir.size() + fileNameLen + MAX_VERSION_DIGITS + 1;
   buffer  = (char *)ats_malloc(bufSize);
   Layout::get()->relative_to(buffer, bufSize, sysconfdir, fileName);
   if (version != ACTIVE_VERSION) {
@@ -261,9 +261,8 @@ Rollback::statFile(version_t version, struct stat *buf)
   }
 
   ats_scoped_str filePath(createPathStr(version));
-  ElevateAccess access(root_access_needed ? ElevateAccess::FILE_PRIVILEGE : 0);
 
-  statResult = stat(filePath, buf);
+  statResult = root_access_needed ? elevating_stat(filePath, buf) : stat(filePath, buf);
 
   return statResult;
 }
@@ -279,12 +278,10 @@ Rollback::openFile(version_t version, int oflags, int *errnoPtr)
   int fd;
 
   ats_scoped_str filePath(createPathStr(version));
-  ElevateAccess access(root_access_needed ? ElevateAccess::FILE_PRIVILEGE : 0);
-
   // TODO: Use the original permissions
   //       Anyhow the _1 files should not be created inside Syconfdir.
   //
-  fd = mgmt_open_mode(filePath, oflags, 0644);
+  fd = mgmt_open_mode_elevate(filePath, oflags, 0644, root_access_needed);
 
   if (fd < 0) {
     if (errnoPtr != nullptr) {
diff --git a/mgmt/api/CoreAPIRemote.cc b/mgmt/api/CoreAPIRemote.cc
index 73b26ce..1f012d5 100644
--- a/mgmt/api/CoreAPIRemote.cc
+++ b/mgmt/api/CoreAPIRemote.cc
@@ -196,7 +196,7 @@ Init(const char *socket_path, TSInitOptionT options)
   // libraries. The caller has to pass down the right socket path :(
   if (!socket_path) {
     Layout::create();
-    socket_path = Layout::get()->runtimedir;
+    socket_path = Layout::get()->runtimedir.c_str();
   }
 
   // store socket_path
diff --git a/mgmt/api/NetworkUtilsRemote.cc b/mgmt/api/NetworkUtilsRemote.cc
index 740ae46..ca5914d 100644
--- a/mgmt/api/NetworkUtilsRemote.cc
+++ b/mgmt/api/NetworkUtilsRemote.cc
@@ -57,8 +57,8 @@ set_socket_paths(const char *path)
   // construct paths based on user input
   // form by replacing "mgmtapi.sock" with "eventapi.sock"
   if (path) {
-    main_socket_path  = Layout::relative_to(path, MGMTAPI_MGMT_SOCKET_NAME);
-    event_socket_path = Layout::relative_to(path, MGMTAPI_EVENT_SOCKET_NAME);
+    main_socket_path  = ats_stringdup(Layout::relative_to(path, MGMTAPI_MGMT_SOCKET_NAME));
+    event_socket_path = ats_stringdup(Layout::relative_to(path, MGMTAPI_EVENT_SOCKET_NAME));
   } else {
     main_socket_path  = nullptr;
     event_socket_path = nullptr;
@@ -114,7 +114,14 @@ ts_connect()
   if (!main_socket_path || !event_socket_path) {
     goto ERROR;
   }
-
+  // make sure the length of main_socket_path do not exceed the sizeof(sun_path)
+  if (strlen(main_socket_path) > sizeof(client_sock.sun_path) - 1) {
+    goto ERROR;
+  }
+  // make sure the length of event_socket_path do not exceed the sizeof(sun_path)
+  if (strlen(event_socket_path) > sizeof(client_event_sock.sun_path) - 1) {
+    goto ERROR;
+  }
   // create a socket
   main_socket_fd = socket(AF_UNIX, SOCK_STREAM, 0);
   if (main_socket_fd < 0) {
@@ -146,7 +153,7 @@ ts_connect()
   // setup Unix domain socket
   memset(&client_event_sock, 0, sizeof(sockaddr_un));
   client_event_sock.sun_family = AF_UNIX;
-  ink_strlcpy(client_event_sock.sun_path, event_socket_path, sizeof(client_sock.sun_path));
+  ink_strlcpy(client_event_sock.sun_path, event_socket_path, sizeof(client_event_sock.sun_path));
 #if defined(darwin) || defined(freebsd)
   sockaddr_len = sizeof(sockaddr_un);
 #else
diff --git a/mgmt/utils/MgmtSocket.cc b/mgmt/utils/MgmtSocket.cc
index 6a51538..9c372a8 100644
--- a/mgmt/utils/MgmtSocket.cc
+++ b/mgmt/utils/MgmtSocket.cc
@@ -23,6 +23,7 @@
 
 #include "ts/ink_platform.h"
 #include "ts/ink_assert.h"
+#include <ts/ink_cap.cc>
 #include "MgmtSocket.h"
 
 #if HAVE_UCRED_H
@@ -154,6 +155,25 @@ mgmt_open_mode(const char *path, int oflag, mode_t mode)
 }
 
 //-------------------------------------------------------------------------
+// mgmt_open_mode_elevate
+//-------------------------------------------------------------------------
+
+int
+mgmt_open_mode_elevate(const char *path, int oflag, mode_t mode, bool elevate_p)
+{
+  int r, retries;
+  for (retries = 0; retries < MGMT_MAX_TRANSIENT_ERRORS; retries++) {
+    r = elevate_p ? elevating_open(path, oflag, mode) : ::open(path, oflag, mode);
+    if (r >= 0) {
+      return r;
+    }
+    if (!mgmt_transient_error()) {
+      break;
+    }
+  }
+  return r;
+}
+//-------------------------------------------------------------------------
 // mgmt_select
 //-------------------------------------------------------------------------
 
diff --git a/mgmt/utils/MgmtSocket.h b/mgmt/utils/MgmtSocket.h
index ab10027..a5c7464 100644
--- a/mgmt/utils/MgmtSocket.h
+++ b/mgmt/utils/MgmtSocket.h
@@ -61,6 +61,12 @@ int mgmt_open(const char *path, int oflag);
 int mgmt_open_mode(const char *path, int oflag, mode_t mode);
 
 //-------------------------------------------------------------------------
+// mgmt_open_mode_elevate
+//-------------------------------------------------------------------------
+
+int mgmt_open_mode_elevate(const char *path, int oflag, mode_t mode, bool elevate_p = false);
+
+//-------------------------------------------------------------------------
 // mgmt_select
 //-------------------------------------------------------------------------
 
diff --git a/plugins/esi/README.combo b/plugins/esi/README.combo
index 97b7b45..e4466ab 100644
--- a/plugins/esi/README.combo
+++ b/plugins/esi/README.combo
@@ -18,6 +18,13 @@ The arguments in the plugin.config line in order represent
 A "-" can be supplied as a value for any of these arguments to request
 default value be applied.
 
+Optional arguments:
+
+  --max-files N
+
+    If present in the plugin.config args, this sets the maximum number of
+    files to process in a request to N. This cannot be changed per remap.
+
 Also, just like the original combohandler, this plugin generates URLs
 of the form 'http://localhost/<dir>/<file-path>'. <dir> here defaults
 to "Host" header unless specified by the file path in the query parameter using
diff --git a/plugins/esi/combo_handler.cc b/plugins/esi/combo_handler.cc
index 64d4e85..3f2e433 100644
--- a/plugins/esi/combo_handler.cc
+++ b/plugins/esi/combo_handler.cc
@@ -30,6 +30,7 @@
 #include <pthread.h>
 #include <arpa/inet.h>
 #include <limits>
+#include <getopt.h>
 
 #include "ts/ts.h"
 #include "ts/experimental.h"
@@ -46,8 +47,18 @@ using namespace EsiLib;
 #define DEBUG_TAG "combo_handler"
 #define FEAT_GATE_8_0
 
-#define MAX_FILE_COUNT 30
-#define MAX_QUERY_LENGTH 3000
+// Because STL vs. C library leads to ugly casting, fix it once.
+inline int
+length(std::string const &str)
+{
+  return static_cast<int>(str.size());
+}
+
+constexpr unsigned DEFAULT_MAX_FILE_COUNT = 100;
+constexpr int MAX_QUERY_LENGTH            = 4096;
+
+unsigned MaxFileCount = DEFAULT_MAX_FILE_COUNT;
+
 // We hardcode "immutable" here because it's not yet defined in the ATS API
 #define HTTP_IMMUTABLE "immutable"
 
@@ -56,8 +67,7 @@ static string SIG_KEY_NAME;
 static vector<string> HEADER_WHITELIST;
 
 #define DEFAULT_COMBO_HANDLER_PATH "admin/v1/combo"
-static string COMBO_HANDLER_PATH;
-static int COMBO_HANDLER_PATH_SIZE;
+static string COMBO_HANDLER_PATH{DEFAULT_COMBO_HANDLER_PATH};
 
 #define LOG_ERROR(fmt, args...)                                                               \
   do {                                                                                        \
@@ -323,8 +333,38 @@ TSPluginInit(int argc, const char *argv[])
     return;
   }
 
-  if ((argc > 1) && (strcmp(argv[1], "-") != 0)) {
-    COMBO_HANDLER_PATH = argv[1];
+  if (argc > 1) {
+    int c;
+    static const struct option longopts[] = {
+      {"max-files", required_argument, nullptr, 'f'}, {nullptr, 0, nullptr, 0},
+    };
+
+    int longindex = 0;
+    optind        = 1; // Force restart to avoid problems with other plugins.
+    while ((c = getopt_long(argc, const_cast<char *const *>(argv), "f:", longopts, &longindex)) != -1) {
+      switch (c) {
+      case 'f': {
+        char *tmp = nullptr;
+        long n    = strtol(optarg, &tmp, 0);
+        if (tmp == optarg) {
+          TSError("[%s] %s requires a numeric argument", DEBUG_TAG, longopts[longindex].name);
+        } else if (n < 1) {
+          TSError("[%s] %s must be a positive number", DEBUG_TAG, longopts[longindex].name);
+        } else {
+          MaxFileCount = n;
+          TSDebug(DEBUG_TAG, "Max files set to %u", MaxFileCount);
+        }
+        break;
+      }
+      default:
+        TSError("[%s] Unrecognized option '%s'", DEBUG_TAG, argv[optind - 1]);
+        break;
+      }
+    }
+  }
+
+  if (argc >= optind && (argv[optind][0] != '-' || argv[optind][1])) {
+    COMBO_HANDLER_PATH = argv[optind];
     if (COMBO_HANDLER_PATH == "/") {
       COMBO_HANDLER_PATH.clear();
     } else {
@@ -335,22 +375,22 @@ TSPluginInit(int argc, const char *argv[])
         COMBO_HANDLER_PATH.erase(COMBO_HANDLER_PATH.size() - 1, 1);
       }
     }
-  } else {
-    COMBO_HANDLER_PATH = DEFAULT_COMBO_HANDLER_PATH;
   }
-  COMBO_HANDLER_PATH_SIZE = static_cast<int>(COMBO_HANDLER_PATH.size());
-  LOG_DEBUG("Combo handler path is [%s]", COMBO_HANDLER_PATH.c_str());
+  ++optind;
+  LOG_DEBUG("Combo handler path is [%.*s]", length(COMBO_HANDLER_PATH), COMBO_HANDLER_PATH.data());
 
-  SIG_KEY_NAME = ((argc > 2) && (strcmp(argv[2], "-") != 0)) ? argv[2] : "";
-  LOG_DEBUG("Signature key is [%s]", SIG_KEY_NAME.c_str());
+  SIG_KEY_NAME = (argc > optind && (argv[optind][0] != '-' || argv[optind][1])) ? argv[optind] : "";
+  ++optind;
+  LOG_DEBUG("Signature key is [%.*s]", length(SIG_KEY_NAME), SIG_KEY_NAME.data());
 
-  if ((argc > 3) && (strcmp(argv[3], "-") != 0)) {
-    stringstream strstream(argv[3]);
+  if (argc > optind && (argv[optind][0] != '-' || argv[optind][1])) {
+    stringstream strstream(argv[optind++]);
     string header;
     while (getline(strstream, header, ':')) {
       HEADER_WHITELIST.push_back(header);
     }
   }
+  ++optind;
 
   for (unsigned int i = 0; i < HEADER_WHITELIST.size(); i++) {
     LOG_DEBUG("WhiteList: %s", HEADER_WHITELIST[i].c_str());
@@ -370,7 +410,7 @@ TSPluginInit(int argc, const char *argv[])
     LOG_ERROR("failed to reserve private data slot");
     return;
   } else {
-    LOG_DEBUG("arg_idx: %d", arg_idx);
+    LOG_DEBUG("txn_arg_idx: %d", arg_idx);
   }
 
   Utils::init(&TSDebug, &TSError);
@@ -465,8 +505,8 @@ isComboHandlerRequest(TSMBuffer bufp, TSMLoc hdr_loc, TSMLoc url_loc)
         LOG_ERROR("Could not get path from request URL");
         retval = false;
       } else {
-        retval =
-          (path_len == COMBO_HANDLER_PATH_SIZE) && (strncasecmp(path, COMBO_HANDLER_PATH.c_str(), COMBO_HANDLER_PATH_SIZE) == 0);
+        retval = (path_len == length(COMBO_HANDLER_PATH)) &&
+                 (strncasecmp(path, COMBO_HANDLER_PATH.data(), COMBO_HANDLER_PATH.size()) == 0);
         LOG_DEBUG("Path [%.*s] is %s combo handler path", path_len, path, (retval ? "a" : "not a"));
       }
     }
@@ -648,7 +688,7 @@ parseQueryParameters(const char *query, int query_len, ClientRequest &creq)
     creq.file_urls.clear();
   }
 
-  if (creq.file_urls.size() > MAX_FILE_COUNT) {
+  if (creq.file_urls.size() > MaxFileCount) {
     creq.status = TS_HTTP_STATUS_BAD_REQUEST;
     LOG_ERROR("too many files in url");
     creq.file_urls.clear();
diff --git a/plugins/esi/lib/HandlerManager.h b/plugins/esi/lib/HandlerManager.h
index f968309..ca076eb 100644
--- a/plugins/esi/lib/HandlerManager.h
+++ b/plugins/esi/lib/HandlerManager.h
@@ -55,7 +55,7 @@ private:
   struct ModuleHandles {
     void *object;
     SpecialIncludeHandlerCreator function;
-    ModuleHandles(void *o = 0, SpecialIncludeHandlerCreator f = 0) : object(o), function(f){};
+    ModuleHandles(void *o = nullptr, SpecialIncludeHandlerCreator f = 0) : object(o), function(f){};
   };
 
   typedef std::map<std::string, ModuleHandles> ModuleHandleMap;
diff --git a/plugins/experimental/cachekey/cachekey.cc b/plugins/experimental/cachekey/cachekey.cc
index 8a6d62b..2a0b140 100644
--- a/plugins/experimental/cachekey/cachekey.cc
+++ b/plugins/experimental/cachekey/cachekey.cc
@@ -179,7 +179,8 @@ classifyUserAgent(const Classifier &c, TSMBuffer buf, TSMLoc hdrs, String &class
  * @param url URI handle
  * @param hdrs headers handle
  */
-CacheKey::CacheKey(TSHttpTxn txn, TSMBuffer buf, TSMLoc url, TSMLoc hdrs) : _txn(txn), _buf(buf), _url(url), _hdrs(hdrs)
+CacheKey::CacheKey(TSHttpTxn txn, TSMBuffer buf, TSMLoc url, TSMLoc hdrs, String separator)
+  : _txn(txn), _buf(buf), _url(url), _hdrs(hdrs), _separator(separator)
 {
   _key.reserve(512);
 }
@@ -191,7 +192,7 @@ CacheKey::CacheKey(TSHttpTxn txn, TSMBuffer buf, TSMLoc url, TSMLoc hdrs) : _txn
 void
 CacheKey::append(unsigned n)
 {
-  _key.append("/");
+  _key.append(_separator);
   ::append(_key, n);
 }
 
@@ -202,7 +203,7 @@ CacheKey::append(unsigned n)
 void
 CacheKey::append(const String &s)
 {
-  _key.append("/");
+  _key.append(_separator);
   ::appendEncoded(_key, s.data(), s.size());
 }
 
@@ -213,7 +214,7 @@ CacheKey::append(const String &s)
 void
 CacheKey::append(const char *s)
 {
-  _key.append("/");
+  _key.append(_separator);
   ::appendEncoded(_key, s, strlen(s));
 }
 
@@ -225,7 +226,7 @@ CacheKey::append(const char *s)
 void
 CacheKey::append(const char *s, unsigned n)
 {
-  _key.append("/");
+  _key.append(_separator);
   ::appendEncoded(_key, s, n);
 }
 
@@ -417,7 +418,7 @@ CacheKey::appendHeaders(const ConfigHeaders &config)
   }
 
   /* It doesn't make sense to have the headers unordered in the cache key. */
-  String headers_key = containerToString<StringSet, StringSet::const_iterator>(hset, "", "/");
+  String headers_key = containerToString<StringSet, StringSet::const_iterator>(hset, "", _separator);
   if (!headers_key.empty()) {
     append(headers_key);
   }
diff --git a/plugins/experimental/cachekey/cachekey.h b/plugins/experimental/cachekey/cachekey.h
index 61ea097..fb2337a 100644
--- a/plugins/experimental/cachekey/cachekey.h
+++ b/plugins/experimental/cachekey/cachekey.h
@@ -49,7 +49,7 @@
 class CacheKey
 {
 public:
-  CacheKey(TSHttpTxn txn, TSMBuffer buf, TSMLoc url, TSMLoc hdrs);
+  CacheKey(TSHttpTxn txn, TSMBuffer buf, TSMLoc url, TSMLoc hdrs, String separator);
 
   void append(unsigned number);
   void append(const String &);
@@ -77,7 +77,8 @@ private:
   TSMLoc _url;    /**< @brief URI handle */
   TSMLoc _hdrs;   /**< @brief headers handle */
 
-  String _key; /**< @brief cache key */
+  String _key;       /**< @brief cache key */
+  String _separator; /**< @brief a separator used to separate the cache key elements extracted from the URI */
 };
 
 #endif /* PLUGINS_EXPERIMENTAL_CACHEKEY_CACHEKEY_H_ */
diff --git a/plugins/experimental/cachekey/configs.cc b/plugins/experimental/cachekey/configs.cc
index b5fb65a..788b56d 100644
--- a/plugins/experimental/cachekey/configs.cc
+++ b/plugins/experimental/cachekey/configs.cc
@@ -43,7 +43,7 @@ commaSeparateString(ContainerType &c, const String &input)
 static bool
 isTrue(const char *arg)
 {
-  return (0 == strncasecmp("true", arg, 4) || 0 == strncasecmp("1", arg, 1) || 0 == strncasecmp("yes", arg, 3));
+  return (nullptr == arg || 0 == strncasecmp("true", arg, 4) || 0 == strncasecmp("1", arg, 1) || 0 == strncasecmp("yes", arg, 3));
 }
 
 void
@@ -341,6 +341,9 @@ Configs::init(int argc, char *argv[])
     {const_cast<char *>("capture-prefix-uri"), optional_argument, nullptr, 'n'},
     {const_cast<char *>("capture-path"), optional_argument, nullptr, 'o'},
     {const_cast<char *>("capture-path-uri"), optional_argument, nullptr, 'p'},
+    {const_cast<char *>("remove-prefix"), optional_argument, nullptr, 'q'},
+    {const_cast<char *>("remove-path"), optional_argument, nullptr, 'r'},
+    {const_cast<char *>("separator"), optional_argument, nullptr, 's'},
     {nullptr, 0, nullptr, 0},
   };
 
@@ -430,6 +433,15 @@ Configs::init(int argc, char *argv[])
         status = false;
       }
       break;
+    case 'q': /* remove-prefix */
+      _prefixToBeRemoved = isTrue(optarg);
+      break;
+    case 'r': /* remove-path */
+      _pathToBeRemoved = isTrue(optarg);
+      break;
+    case 's': /* separator */
+      setSeparator(optarg);
+      break;
     }
   }
 
@@ -448,3 +460,29 @@ Configs::finalize()
 {
   return _query.finalize() && _headers.finalize() && _cookies.finalize();
 }
+
+bool
+Configs::prefixToBeRemoved()
+{
+  return _prefixToBeRemoved;
+}
+
+bool
+Configs::pathToBeRemoved()
+{
+  return _pathToBeRemoved;
+}
+
+void
+Configs::setSeparator(const char *arg)
+{
+  if (nullptr != arg) {
+    _separator.assign(arg);
+  }
+}
+
+const String &
+Configs::getSeparator()
+{
+  return _separator;
+}
diff --git a/plugins/experimental/cachekey/configs.h b/plugins/experimental/cachekey/configs.h
index f1376fe..9bf71cb 100644
--- a/plugins/experimental/cachekey/configs.h
+++ b/plugins/experimental/cachekey/configs.h
@@ -122,7 +122,7 @@ private:
 class Configs
 {
 public:
-  Configs() {}
+  Configs() : _prefixToBeRemoved(false), _pathToBeRemoved(false), _separator("/") {}
   /**
    * @brief initializes plugin configuration.
    * @param argc number of plugin parameters
@@ -137,6 +137,26 @@ public:
    */
   bool finalize();
 
+  /**
+   * @brief Tells the caller if the prefix is to be removed (not processed at all).
+   */
+  bool prefixToBeRemoved();
+
+  /**
+   * @brief Tells the caller if the path is to be removed (not processed at all).
+   */
+  bool pathToBeRemoved();
+
+  /**
+   * @brief set the cache key elements separator string.
+   */
+  void setSeparator(const char *arg);
+
+  /**
+   * @brief get the cache key elements separator string.
+   */
+  const String &getSeparator();
+
   /* Make the following members public to avoid unnecessary accessors */
   ConfigQuery _query;        /**< @brief query parameter related configuration */
   ConfigHeaders _headers;    /**< @brief headers related configuration */
@@ -157,6 +177,10 @@ private:
    * @return true if successful, false otherwise.
    */
   bool loadClassifiers(const String &args, bool blacklist = true);
+
+  bool _prefixToBeRemoved; /**< @brief instructs the prefix (i.e. host:port) not to added to the cache key */
+  bool _pathToBeRemoved;   /**< @brief instructs the path not to added to the cache key */
+  String _separator;       /**< @brief a separator used to separate the cache key elements extracted from the URI */
 };
 
 #endif // PLUGINS_EXPERIMENTAL_CACHEKEY_CONFIGS_H_
diff --git a/plugins/experimental/cachekey/pattern.cc b/plugins/experimental/cachekey/pattern.cc
index 35ef0dd..40dfae7 100644
--- a/plugins/experimental/cachekey/pattern.cc
+++ b/plugins/experimental/cachekey/pattern.cc
@@ -38,7 +38,7 @@ replaceString(String &str, const String &from, const String &to)
   }
 }
 
-Pattern::Pattern() : _re(nullptr), _extra(nullptr), _pattern(""), _replacement(""), _tokenCount(0)
+Pattern::Pattern() : _re(nullptr), _extra(nullptr), _pattern(""), _replacement(""), _replace(false), _tokenCount(0)
 {
 }
 
@@ -49,12 +49,13 @@ Pattern::Pattern() : _re(nullptr), _extra(nullptr), _pattern(""), _replacement("
  * @return true if successful, false if failure
  */
 bool
-Pattern::init(const String &pattern, const String &replacenemt)
+Pattern::init(const String &pattern, const String &replacenemt, bool replace)
 {
   pcreFree();
 
   _pattern.assign(pattern);
   _replacement.assign(replacenemt);
+  _replace = replace;
 
   _tokenCount = 0;
 
@@ -115,9 +116,9 @@ Pattern::init(const String &config)
     ::replaceString(pattern, "\\/", "/");
     ::replaceString(replacement, "\\/", "/");
 
-    return this->init(pattern, replacement);
+    return this->init(pattern, replacement, /* replace */ true);
   } else {
-    return this->init(config, "");
+    return this->init(config, /* replacement */ "", /*replace */ false);
   }
 
   /* Should never get here. */
@@ -170,7 +171,7 @@ Pattern::~Pattern()
 bool
 Pattern::process(const String &subject, StringVector &result)
 {
-  if (!_replacement.empty()) {
+  if (_replace) {
     /* Replacement pattern was provided in the configuration - capture and replace. */
     String element;
     if (replace(subject, element)) {
@@ -235,9 +236,10 @@ Pattern::capture(const String &subject, StringVector &result)
   int matchCount;
   int ovector[OVECOUNT];
 
-  CacheKeyDebug("matching '%s' to '%s'", _pattern.c_str(), subject.c_str());
+  CacheKeyDebug("capturing '%s' from '%s'", _pattern.c_str(), subject.c_str());
 
   if (!_re) {
+    CacheKeyError("regular expression not initialized");
     return false;
   }
 
@@ -274,9 +276,10 @@ Pattern::replace(const String &subject, String &result)
   int matchCount;
   int ovector[OVECOUNT];
 
-  CacheKeyDebug("matching '%s' to '%s'", _pattern.c_str(), subject.c_str());
+  CacheKeyDebug("replacing:'%s' in pattern:'%s', subject:'%s'", _replacement.c_str(), _pattern.c_str(), subject.c_str());
 
-  if (!_re) {
+  if (!_re || !_replace) {
+    CacheKeyError("regular expression not initialized or not configured to replace");
     return false;
   }
 
@@ -330,7 +333,8 @@ Pattern::compile()
   const char *errPtr; /* PCRE error */
   int errOffset;      /* PCRE error offset */
 
-  CacheKeyDebug("compiling pattern:'%s', replacement:'%s'", _pattern.c_str(), _replacement.c_str());
+  CacheKeyDebug("compiling pattern:'%s', replace: %s, replacement:'%s'", _pattern.c_str(), _replace ? "true" : "false",
+                _replacement.c_str());
 
   _re = pcre_compile(_pattern.c_str(), /* the pattern */
                      0,                /* options */
@@ -354,7 +358,7 @@ Pattern::compile()
     return false;
   }
 
-  if (_replacement.empty()) {
+  if (!_replace) {
     /* No replacement necessary - we are done. */
     return true;
   }
diff --git a/plugins/experimental/cachekey/pattern.h b/plugins/experimental/cachekey/pattern.h
index 0ddd383..1448862 100644
--- a/plugins/experimental/cachekey/pattern.h
+++ b/plugins/experimental/cachekey/pattern.h
@@ -46,7 +46,7 @@ public:
   Pattern();
   virtual ~Pattern();
 
-  bool init(const String &pattern, const String &replacenemt);
+  bool init(const String &pattern, const String &replacenemt, bool replace);
   bool init(const String &config);
   bool empty() const;
   bool match(const String &subject);
@@ -64,6 +64,9 @@ private:
   String _pattern;     /**< @brief PCRE pattern string, containing PCRE patterns and capturing groups. */
   String _replacement; /**< @brief PCRE replacement string, containing $0..$9 to be replaced with content of the capturing groups */
 
+  bool _replace; /**< @brief true if a replacement is needed, false if not, this is to distinguish between an empty replacement
+                    string and no replacement needed case */
+
   int _tokenCount;              /**< @brief number of replacements $0..$9 found in the replacement string if not empty */
   int _tokens[TOKENCOUNT];      /**< @brief replacement index 0..9, since they can be used in the replacement string in any order */
   int _tokenOffset[TOKENCOUNT]; /**< @brief replacement offset inside the replacement string */
diff --git a/plugins/experimental/cachekey/plugin.cc b/plugins/experimental/cachekey/plugin.cc
index 2378264..d413bd3 100644
--- a/plugins/experimental/cachekey/plugin.cc
+++ b/plugins/experimental/cachekey/plugin.cc
@@ -92,11 +92,12 @@ TSRemapDoRemap(void *instance, TSHttpTxn txn, TSRemapRequestInfo *rri)
 
   if (nullptr != config) {
     /* Initial cache key facility from the requested URL. */
-    CacheKey cachekey(txn, rri->requestBufp, rri->requestUrl, rri->requestHdrp);
+    CacheKey cachekey(txn, rri->requestBufp, rri->requestUrl, rri->requestHdrp, config->getSeparator());
 
     /* Append custom prefix or the host:port */
-    cachekey.appendPrefix(config->_prefix, config->_prefixCapture, config->_prefixCaptureUri);
-
+    if (!config->prefixToBeRemoved()) {
+      cachekey.appendPrefix(config->_prefix, config->_prefixCapture, config->_prefixCaptureUri);
+    }
     /* Classify User-Agent and append the class name to the cache key if matched. */
     cachekey.appendUaClass(config->_classifier);
 
@@ -110,8 +111,9 @@ TSRemapDoRemap(void *instance, TSHttpTxn txn, TSRemapRequestInfo *rri)
     cachekey.appendCookies(config->_cookies);
 
     /* Append the path to the cache key. */
-    cachekey.appendPath(config->_pathCapture, config->_pathCaptureUri);
-
+    if (!config->pathToBeRemoved()) {
+      cachekey.appendPath(config->_pathCapture, config->_pathCaptureUri);
+    }
     /* Append query parameters to the cache key. */
     cachekey.appendQuery(config->_query);
 
diff --git a/plugins/experimental/header_normalize/header_normalize.cc b/plugins/experimental/header_normalize/header_normalize.cc
index c4d9e6b..fc99f89 100644
--- a/plugins/experimental/header_normalize/header_normalize.cc
+++ b/plugins/experimental/header_normalize/header_normalize.cc
@@ -135,6 +135,7 @@ buildHdrMap()
   hdrMap["xref"]                      = "Xref";
   hdrMap["x-id"]                      = "X-ID";
   hdrMap["x-forwarded-for"]           = "X-Forwarded-For";
+  hdrMap["forwarded"]                 = "Forwarded";
   hdrMap["sec-websocket-key"]         = "Sec-WebSocket-Key";
   hdrMap["sec-websocket-version"]     = "Sec-WebSocket-Version";
 }
diff --git a/plugins/experimental/metalink/metalink.cc b/plugins/experimental/metalink/metalink.cc
index 052e1bf..95ccfd0 100644
--- a/plugins/experimental/metalink/metalink.cc
+++ b/plugins/experimental/metalink/metalink.cc
@@ -464,8 +464,7 @@ vconn_write_ready(TSCont contp, void * /* edata ATS_UNUSED */)
 
     /* Can't reuse the TSTransformCreate() continuation because we
      * don't know whether to destroy it in
-     * cache_open_write()/cache_open_write_failed() or
-     * transform_vconn_write_complete() */
+     * cache_open_write()/cache_open_write_failed() */
     contp = TSContCreate(write_handler, nullptr);
     TSContDataSet(contp, write_data);
 
@@ -476,18 +475,6 @@ vconn_write_ready(TSCont contp, void * /* edata ATS_UNUSED */)
   return 0;
 }
 
-static int
-transform_vconn_write_complete(TSCont contp, void * /* edata ATS_UNUSED */)
-{
-  TransformData *data = (TransformData *)TSContDataGet(contp);
-  TSContDestroy(contp);
-
-  TSIOBufferDestroy(data->output_bufp);
-  TSfree(data);
-
-  return 0;
-}
-
 /* TSTransformCreate() handler: Compute the SHA-256 digest of the
  * content */
 
@@ -500,8 +487,8 @@ transform_handler(TSCont contp, TSEvent event, void *edata)
     return vconn_write_ready(contp, edata);
 
   case TS_EVENT_VCONN_WRITE_COMPLETE:
-    return transform_vconn_write_complete(contp, edata);
-
+    TSVConnShutdown(TSTransformOutputVConnGet(contp), 0, 1);
+    break;
   default:
     TSAssert(!"Unexpected event");
   }
diff --git a/plugins/experimental/ts_lua/ts_lua.c b/plugins/experimental/ts_lua/ts_lua.c
index c1f053a..59cfb9e 100644
--- a/plugins/experimental/ts_lua/ts_lua.c
+++ b/plugins/experimental/ts_lua/ts_lua.c
@@ -90,7 +90,8 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_s
   }
 
   if (states > TS_LUA_MAX_STATE_COUNT || states < 1) {
-    snprintf(errbuf, errbuf_size, "[TSRemapNewInstance] - invalid state in option input");
+    snprintf(errbuf, errbuf_size, "[TSRemapNewInstance] - invalid state in option input. Must be between 1 and %d",
+             TS_LUA_MAX_STATE_COUNT);
     return TS_ERROR;
   }
 
@@ -336,10 +337,6 @@ globalHookHandler(TSCont contp, TSEvent event ATS_UNUSED, void *edata)
     lua_getglobal(l, TS_LUA_FUNCTION_G_POST_REMAP);
     break;
 
-  case TS_EVENT_HTTP_SELECT_ALT:
-    lua_getglobal(l, TS_LUA_FUNCTION_G_SELECT_ALT);
-    break;
-
   case TS_EVENT_HTTP_OS_DNS:
     lua_getglobal(l, TS_LUA_FUNCTION_G_OS_DNS);
     break;
@@ -444,7 +441,7 @@ TSPluginInit(int argc, const char *argv[])
   }
 
   if (states > TS_LUA_MAX_STATE_COUNT || states < 1) {
-    TSError("[ts_lua][%s] invalid # of states from option input", __FUNCTION__);
+    TSError("[ts_lua][%s] invalid # of states from option input. Must be between 1 and %d", __FUNCTION__, TS_LUA_MAX_STATE_COUNT);
     return;
   }
 
@@ -549,13 +546,6 @@ TSPluginInit(int argc, const char *argv[])
   }
   lua_pop(l, 1);
 
-  lua_getglobal(l, TS_LUA_FUNCTION_G_SELECT_ALT);
-  if (lua_type(l, -1) == LUA_TFUNCTION) {
-    TSHttpHookAdd(TS_HTTP_SELECT_ALT_HOOK, global_contp);
-    TSDebug(TS_LUA_DEBUG_TAG, "select_alt_hook added");
-  }
-  lua_pop(l, 1);
-
   lua_getglobal(l, TS_LUA_FUNCTION_G_OS_DNS);
   if (lua_type(l, -1) == LUA_TFUNCTION) {
     TSHttpHookAdd(TS_HTTP_OS_DNS_HOOK, global_contp);
diff --git a/plugins/experimental/ts_lua/ts_lua_common.h b/plugins/experimental/ts_lua/ts_lua_common.h
index 12654b6..fa1baeb 100644
--- a/plugins/experimental/ts_lua/ts_lua_common.h
+++ b/plugins/experimental/ts_lua/ts_lua_common.h
@@ -44,7 +44,6 @@
 #define TS_LUA_FUNCTION_PRE_REMAP "do_pre_remap"
 #define TS_LUA_FUNCTION_POST_REMAP "do_post_remap"
 #define TS_LUA_FUNCTION_OS_DNS "do_os_dns"
-#define TS_LUA_FUNCTION_SELECT_ALT "do_select_alt"
 #define TS_LUA_FUNCTION_READ_CACHE "do_read_cache"
 #define TS_LUA_FUNCTION_TXN_CLOSE "do_txn_close"
 
@@ -57,7 +56,6 @@
 #define TS_LUA_FUNCTION_G_PRE_REMAP "do_global_pre_remap"
 #define TS_LUA_FUNCTION_G_POST_REMAP "do_global_post_remap"
 #define TS_LUA_FUNCTION_G_OS_DNS "do_global_os_dns"
-#define TS_LUA_FUNCTION_G_SELECT_ALT "do_global_select_alt"
 #define TS_LUA_FUNCTION_G_READ_CACHE "do_global_read_cache"
 #define TS_LUA_FUNCTION_G_TXN_CLOSE "do_global_txn_close"
 
diff --git a/plugins/experimental/ts_lua/ts_lua_hook.c b/plugins/experimental/ts_lua/ts_lua_hook.c
index 8a6e7b4..81e3298 100644
--- a/plugins/experimental/ts_lua/ts_lua_hook.c
+++ b/plugins/experimental/ts_lua/ts_lua_hook.c
@@ -31,7 +31,6 @@ typedef enum {
   TS_LUA_HOOK_PRE_REMAP,
   TS_LUA_HOOK_POST_REMAP,
   TS_LUA_HOOK_OS_DNS,
-  TS_LUA_HOOK_SELECT_ALT,
   TS_LUA_HOOK_READ_CACHE_HDR,
   TS_LUA_HOOK_TXN_CLOSE,
   TS_LUA_REQUEST_TRANSFORM,
@@ -49,7 +48,6 @@ char *ts_lua_hook_id_string[] = {"TS_LUA_HOOK_DUMMY",
                                  "TS_LUA_HOOK_PRE_REMAP",
                                  "TS_LUA_HOOK_POST_REMAP",
                                  "TS_LUA_HOOK_OS_DNS",
-                                 "TS_LUA_HOOK_SELECT_ALT",
                                  "TS_LUA_HOOK_READ_CACHE_HDR",
                                  "TS_LUA_HOOK_TXN_CLOSE",
                                  "TS_LUA_REQUEST_TRANSFORM",
@@ -205,18 +203,6 @@ ts_lua_add_hook(lua_State *L)
     }
     break;
 
-  case TS_LUA_HOOK_SELECT_ALT:
-    if (http_ctx) {
-      TSHttpTxnHookAdd(http_ctx->txnp, TS_HTTP_SELECT_ALT_HOOK, http_ctx->cinfo.contp);
-      http_ctx->has_hook = 1;
-      lua_pushvalue(L, 2);
-      lua_setglobal(L, TS_LUA_FUNCTION_SELECT_ALT);
-    } else {
-      lua_pushvalue(L, 2);
-      lua_setglobal(L, TS_LUA_FUNCTION_G_SELECT_ALT);
-    }
-    break;
-
   case TS_LUA_HOOK_READ_CACHE_HDR:
     if (http_ctx) {
       TSHttpTxnHookAdd(http_ctx->txnp, TS_HTTP_READ_CACHE_HDR_HOOK, http_ctx->cinfo.contp);
diff --git a/plugins/experimental/ts_lua/ts_lua_http_config.c b/plugins/experimental/ts_lua/ts_lua_http_config.c
index be4e88e..1a57d79 100644
--- a/plugins/experimental/ts_lua/ts_lua_http_config.c
+++ b/plugins/experimental/ts_lua/ts_lua_http_config.c
@@ -40,6 +40,7 @@ typedef enum {
   TS_LUA_CONFIG_HTTP_ANONYMIZE_INSERT_CLIENT_IP               = TS_CONFIG_HTTP_ANONYMIZE_INSERT_CLIENT_IP,
   TS_LUA_CONFIG_HTTP_RESPONSE_SERVER_ENABLED                  = TS_CONFIG_HTTP_RESPONSE_SERVER_ENABLED,
   TS_LUA_CONFIG_HTTP_INSERT_SQUID_X_FORWARDED_FOR             = TS_CONFIG_HTTP_INSERT_SQUID_X_FORWARDED_FOR,
+  TS_LUA_CONFIG_HTTP_INSERT_FORWARDED                         = TS_CONFIG_HTTP_INSERT_FORWARDED,
   TS_LUA_CONFIG_HTTP_SERVER_TCP_INIT_CWND                     = TS_CONFIG_HTTP_SERVER_TCP_INIT_CWND,
   TS_LUA_CONFIG_HTTP_SEND_HTTP11_REQUESTS                     = TS_CONFIG_HTTP_SEND_HTTP11_REQUESTS,
   TS_LUA_CONFIG_HTTP_CACHE_HTTP                               = TS_CONFIG_HTTP_CACHE_HTTP,
@@ -84,7 +85,7 @@ typedef enum {
   TS_LUA_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER_MARK              = TS_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER_MARK,
   TS_LUA_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER_MARK             = TS_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER_MARK,
   TS_LUA_CONFIG_HTTP_CACHE_RANGE_LOOKUP                       = TS_CONFIG_HTTP_CACHE_RANGE_LOOKUP,
-  TS_LUA_CONFIG_HTTP_NORMALIZE_AE_GZIP                        = TS_CONFIG_HTTP_NORMALIZE_AE_GZIP,
+  TS_LUA_CONFIG_HTTP_NORMALIZE_AE                             = TS_CONFIG_HTTP_NORMALIZE_AE,
   TS_LUA_CONFIG_HTTP_DEFAULT_BUFFER_SIZE                      = TS_CONFIG_HTTP_DEFAULT_BUFFER_SIZE,
   TS_LUA_CONFIG_HTTP_DEFAULT_BUFFER_WATER_MARK                = TS_CONFIG_HTTP_DEFAULT_BUFFER_WATER_MARK,
   TS_LUA_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE                  = TS_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE,
@@ -163,6 +164,7 @@ ts_lua_var_item ts_lua_http_config_vars[] = {
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_ANONYMIZE_INSERT_CLIENT_IP),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_RESPONSE_SERVER_ENABLED),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_INSERT_SQUID_X_FORWARDED_FOR),
+  TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_INSERT_FORWARDED),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_SERVER_TCP_INIT_CWND),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_SEND_HTTP11_REQUESTS),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CACHE_HTTP),
@@ -207,7 +209,7 @@ ts_lua_var_item ts_lua_http_config_vars[] = {
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER_MARK),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER_MARK),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CACHE_RANGE_LOOKUP),
-  TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_NORMALIZE_AE_GZIP),
+  TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_NORMALIZE_AE),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_DEFAULT_BUFFER_SIZE),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_DEFAULT_BUFFER_WATER_MARK),
   TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE),
diff --git a/plugins/experimental/ts_lua/ts_lua_http_milestone.c b/plugins/experimental/ts_lua/ts_lua_http_milestone.c
index 0c78601..6d826ea 100644
--- a/plugins/experimental/ts_lua/ts_lua_http_milestone.c
+++ b/plugins/experimental/ts_lua/ts_lua_http_milestone.c
@@ -40,7 +40,9 @@ typedef enum {
   TS_LUA_MILESTONE_SM_START                = TS_MILESTONE_SM_START,
   TS_LUA_MILESTONE_SM_FINISH               = TS_MILESTONE_SM_FINISH,
   TS_LUA_MILESTONE_PLUGIN_ACTIVE           = TS_MILESTONE_PLUGIN_ACTIVE,
-  TS_LUA_MILESTONE_PLUGIN_TOTAL            = TS_MILESTONE_PLUGIN_TOTAL
+  TS_LUA_MILESTONE_PLUGIN_TOTAL            = TS_MILESTONE_PLUGIN_TOTAL,
+  TS_LUA_MILESTONE_TLS_HANDSHAKE_START     = TS_MILESTONE_TLS_HANDSHAKE_START,
+  TS_LUA_MILESTONE_TLS_HANDSHAKE_END       = TS_MILESTONE_TLS_HANDSHAKE_END
 } TSLuaMilestoneType;
 
 ts_lua_var_item ts_lua_milestone_type_vars[] = {TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_UA_BEGIN),
@@ -64,7 +66,9 @@ ts_lua_var_item ts_lua_milestone_type_vars[] = {TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILE
                                                 TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_SM_START),
                                                 TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_SM_FINISH),
                                                 TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_PLUGIN_ACTIVE),
-                                                TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_PLUGIN_TOTAL)};
+                                                TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_PLUGIN_TOTAL),
+                                                TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_TLS_HANDSHAKE_START),
+                                                TS_LUA_MAKE_VAR_ITEM(TS_LUA_MILESTONE_TLS_HANDSHAKE_END)};
 
 static void ts_lua_inject_http_milestone_variables(lua_State *L);
 
diff --git a/plugins/experimental/ts_lua/ts_lua_log.c b/plugins/experimental/ts_lua/ts_lua_log.c
index 3837029..72a48d2 100644
--- a/plugins/experimental/ts_lua/ts_lua_log.c
+++ b/plugins/experimental/ts_lua/ts_lua_log.c
@@ -90,7 +90,7 @@ ts_lua_log_object_write(lua_State *L)
   if (log) {
     TSTextLogObjectWrite(log, (char *)text, NULL);
   } else {
-    TSError("[ts_lua][%s] log is not exsited!", __FUNCTION__);
+    TSError("[ts_lua][%s] log object does not exist for write", __FUNCTION__);
   }
 
   return 0;
diff --git a/plugins/experimental/ts_lua/ts_lua_server_request.c b/plugins/experimental/ts_lua/ts_lua_server_request.c
index d67f894..7ba6995 100644
--- a/plugins/experimental/ts_lua/ts_lua_server_request.c
+++ b/plugins/experimental/ts_lua/ts_lua_server_request.c
@@ -569,6 +569,8 @@ ts_lua_server_request_get_url_host(lua_State *L)
 
   GET_HTTP_CONTEXT(http_ctx, L);
 
+  TS_LUA_CHECK_SERVER_REQUEST_URL(http_ctx);
+
   host = TSUrlHostGet(http_ctx->server_request_bufp, http_ctx->server_request_url, &len);
 
   if (len == 0) {
@@ -624,6 +626,7 @@ ts_lua_server_request_get_url_scheme(lua_State *L)
   ts_lua_http_ctx *http_ctx;
 
   GET_HTTP_CONTEXT(http_ctx, L);
+  TS_LUA_CHECK_SERVER_REQUEST_URL(http_ctx);
 
   scheme = TSUrlSchemeGet(http_ctx->server_request_bufp, http_ctx->server_request_url, &len);
 
@@ -641,6 +644,7 @@ ts_lua_server_request_set_url_scheme(lua_State *L)
   ts_lua_http_ctx *http_ctx;
 
   GET_HTTP_CONTEXT(http_ctx, L);
+  TS_LUA_CHECK_SERVER_REQUEST_URL(http_ctx);
 
   scheme = luaL_checklstring(L, 1, &len);
 
diff --git a/plugins/experimental/ts_lua/ts_lua_server_response.c b/plugins/experimental/ts_lua/ts_lua_server_response.c
index d3e5338..107c9b4 100644
--- a/plugins/experimental/ts_lua/ts_lua_server_response.c
+++ b/plugins/experimental/ts_lua/ts_lua_server_response.c
@@ -291,7 +291,7 @@ ts_lua_server_response_set_status(lua_State *L)
 {
   int status;
   const char *reason;
-  int reason_len;
+  int reason_len = 0;
 
   ts_lua_http_ctx *http_ctx;
 
@@ -301,8 +301,10 @@ ts_lua_server_response_set_status(lua_State *L)
 
   status = luaL_checkint(L, 1);
 
-  reason     = TSHttpHdrReasonLookup(status);
-  reason_len = strlen(reason);
+  reason = TSHttpHdrReasonLookup(status);
+  if (reason) {
+    reason_len = strlen(reason);
+  }
 
   TSHttpHdrStatusSet(http_ctx->server_response_bufp, http_ctx->server_response_hdrp, status);
   TSHttpHdrReasonSet(http_ctx->server_response_bufp, http_ctx->server_response_hdrp, reason, reason_len);
diff --git a/plugins/experimental/ts_lua/ts_lua_util.c b/plugins/experimental/ts_lua/ts_lua_util.c
index 58dabdc..159c86c 100644
--- a/plugins/experimental/ts_lua/ts_lua_util.c
+++ b/plugins/experimental/ts_lua/ts_lua_util.c
@@ -762,15 +762,6 @@ ts_lua_http_cont_handler(TSCont contp, TSEvent ev, void *edata)
 
     break;
 
-  case TS_EVENT_HTTP_SELECT_ALT:
-
-    lua_getglobal(L, TS_LUA_FUNCTION_SELECT_ALT);
-    if (lua_type(L, -1) == LUA_TFUNCTION) {
-      ret = lua_resume(L, 0);
-    }
-
-    break;
-
   case TS_EVENT_HTTP_READ_CACHE_HDR:
 
     lua_getglobal(L, TS_LUA_FUNCTION_READ_CACHE);
diff --git a/plugins/header_rewrite/operators.cc b/plugins/header_rewrite/operators.cc
index 7462acc..dbda2a4 100644
--- a/plugins/header_rewrite/operators.cc
+++ b/plugins/header_rewrite/operators.cc
@@ -292,6 +292,64 @@ OperatorSetRedirect::initialize(Parser &p)
 }
 
 void
+EditRedirectResponse(TSHttpTxn txnp, std::string const &location, int const &size, TSHttpStatus status, TSMBuffer bufp,
+                     TSMLoc hdr_loc)
+{
+  // Set new location.
+  TSMLoc field_loc;
+  static std::string header("Location");
+  if (TS_SUCCESS == TSMimeHdrFieldCreateNamed(bufp, hdr_loc, header.c_str(), header.size(), &field_loc)) {
+    if (TS_SUCCESS == TSMimeHdrFieldValueStringSet(bufp, hdr_loc, field_loc, -1, location.c_str(), size)) {
+      TSDebug(PLUGIN_NAME, "   Adding header %s", header.c_str());
+      TSMimeHdrFieldAppend(bufp, hdr_loc, field_loc);
+    }
+    const char *reason = TSHttpHdrReasonLookup(status);
+    size_t len         = strlen(reason);
+    TSHttpHdrReasonSet(bufp, hdr_loc, reason, len);
+    TSHandleMLocRelease(bufp, hdr_loc, field_loc);
+  }
+
+  // Set the body.
+  static std::string msg = "<HTML>\n<HEAD>\n<TITLE>Document Has Moved</TITLE>\n</HEAD>\n"
+                           "<BODY BGCOLOR=\"white\" FGCOLOR=\"black\">\n"
+                           "<H1>Document Has Moved</H1>\n<HR>\n<FONT FACE=\"Helvetica,Arial\"><B>\n"
+                           "Description: The document you requested has moved to a new location."
+                           " The new location is \"" +
+                           location + "\".\n</B></FONT>\n<HR>\n</BODY>\n";
+  TSHttpTxnErrorBodySet(txnp, TSstrdup(msg.c_str()), msg.length(), TSstrdup("text/html"));
+}
+
+static int
+cont_add_location(TSCont contp, TSEvent event, void *edata)
+{
+  TSHttpTxn txnp = static_cast<TSHttpTxn>(edata);
+
+  OperatorSetRedirect *osd = static_cast<OperatorSetRedirect *>(TSContDataGet(contp));
+  // Set the new status code and reason.
+  TSHttpStatus status = osd->get_status();
+  switch (event) {
+  case TS_EVENT_HTTP_SEND_RESPONSE_HDR: {
+    int size;
+    TSMBuffer bufp;
+    TSMLoc hdr_loc;
+    if (TSHttpTxnClientRespGet(txnp, &bufp, &hdr_loc) == TS_SUCCESS) {
+      EditRedirectResponse(txnp, osd->get_location(size), size, status, bufp, hdr_loc);
+    } else {
+      TSDebug(PLUGIN_NAME, "Could not retrieve the response header");
+    }
+
+  } break;
+
+  case TS_EVENT_HTTP_TXN_CLOSE:
+    TSContDestroy(contp);
+    break;
+  default:
+    break;
+  }
+  return 0;
+}
+
+void
 OperatorSetRedirect::exec(const Resources &res) const
 {
   if (res.bufp && res.hdr_loc && res.client_bufp && res.client_hdr_loc) {
@@ -361,34 +419,24 @@ OperatorSetRedirect::exec(const Resources &res) const
       const_cast<Resources &>(res).changed_url = true;
       res._rri->redirect                       = 1;
     } else {
-      // Set new location.
-      TSMLoc field_loc;
-      std::string header("Location");
-      if (TS_SUCCESS == TSMimeHdrFieldCreateNamed(res.bufp, res.hdr_loc, header.c_str(), header.size(), &field_loc)) {
-        if (TS_SUCCESS == TSMimeHdrFieldValueStringSet(res.bufp, res.hdr_loc, field_loc, -1, value.c_str(), value.size())) {
-          TSDebug(PLUGIN_NAME, "   Adding header %s", header.c_str());
-          TSMimeHdrFieldAppend(res.bufp, res.hdr_loc, field_loc);
-        }
-        TSHandleMLocRelease(res.bufp, res.hdr_loc, field_loc);
-      }
-
       // Set the new status code and reason.
       TSHttpStatus status = (TSHttpStatus)_status.get_int_value();
-      const char *reason  = TSHttpHdrReasonLookup(status);
-      size_t len          = strlen(reason);
+      switch (get_hook()) {
+      case TS_HTTP_PRE_REMAP_HOOK: {
+        TSHttpTxnSetHttpRetStatus(res.txnp, status);
+        TSCont contp = TSContCreate(cont_add_location, nullptr);
+        TSContDataSet(contp, const_cast<OperatorSetRedirect *>(this));
+        TSHttpTxnHookAdd(res.txnp, TS_HTTP_SEND_RESPONSE_HDR_HOOK, contp);
+        TSHttpTxnHookAdd(res.txnp, TS_HTTP_TXN_CLOSE_HOOK, contp);
+        TSHttpTxnReenable(res.txnp, TS_EVENT_HTTP_CONTINUE);
+        return;
+      } break;
+      default:
+        break;
+      }
       TSHttpHdrStatusSet(res.bufp, res.hdr_loc, status);
-      TSHttpHdrReasonSet(res.bufp, res.hdr_loc, reason, len);
-
-      // Set the body.
-      std::string msg = "<HTML>\n<HEAD>\n<TITLE>Document Has Moved</TITLE>\n</HEAD>\n"
-                        "<BODY BGCOLOR=\"white\" FGCOLOR=\"black\">\n"
-                        "<H1>Document Has Moved</H1>\n<HR>\n<FONT FACE=\"Helvetica,Arial\"><B>\n"
-                        "Description: The document you requested has moved to a new location."
-                        " The new location is \"" +
-                        value + "\".\n</B></FONT>\n<HR>\n</BODY>\n";
-      TSHttpTxnErrorBodySet(res.txnp, TSstrdup(msg.c_str()), msg.length(), TSstrdup("text/html"));
+      EditRedirectResponse(res.txnp, value, value.size(), status, res.bufp, res.hdr_loc);
     }
-
     TSDebug(PLUGIN_NAME, "OperatorSetRedirect::exec() invoked with destination=%s and status code=%d", value.c_str(),
             _status.get_int_value());
   }
diff --git a/plugins/header_rewrite/operators.h b/plugins/header_rewrite/operators.h
index d7ec61f..a07caf0 100644
--- a/plugins/header_rewrite/operators.h
+++ b/plugins/header_rewrite/operators.h
@@ -110,6 +110,17 @@ class OperatorSetRedirect : public Operator
 public:
   OperatorSetRedirect() { TSDebug(PLUGIN_NAME_DBG, "Calling CTOR for OperatorSetRedirect"); }
   void initialize(Parser &p);
+  TSHttpStatus
+  get_status()
+  {
+    return static_cast<TSHttpStatus>(_status.get_int_value());
+  }
+  std::string
+  get_location(int &size)
+  {
+    size = (int)_location.size();
+    return static_cast<std::string>(_location.get_value());
+  }
 
 protected:
   void exec(const Resources &res) const;
diff --git a/plugins/s3_auth/Makefile.inc b/plugins/s3_auth/Makefile.inc
index 7865d5e..696aa5e 100644
--- a/plugins/s3_auth/Makefile.inc
+++ b/plugins/s3_auth/Makefile.inc
@@ -16,3 +16,11 @@
 
 pkglib_LTLIBRARIES += s3_auth/s3_auth.la
 s3_auth_s3_auth_la_SOURCES = s3_auth/s3_auth.cc s3_auth/aws_auth_v4.cc
+
+check_PROGRAMS +=  s3_auth/test_s3auth
+
+s3_auth_test_s3auth_CPPFLAGS = $(AM_CPPFLAGS) -I$(abs_top_srcdir)/tests/include -DAWS_AUTH_V4_UNIT_TEST
+s3_auth_test_s3auth_LDADD = $(OPENSSL_LIBS)
+s3_auth_test_s3auth_SOURCES = \
+    s3_auth/unit-tests/test_aws_auth_v4.cc \
+    s3_auth/aws_auth_v4.cc
diff --git a/plugins/s3_auth/aws_auth_v4.cc b/plugins/s3_auth/aws_auth_v4.cc
index 386bf69..5c3cd66 100644
--- a/plugins/s3_auth/aws_auth_v4.cc
+++ b/plugins/s3_auth/aws_auth_v4.cc
@@ -23,6 +23,7 @@
  */
 
 #include <cstring>        /* strlen() */
+#include <string>         /* stoi() */
 #include <ctime>          /* strftime(), time(), gmtime_r() */
 #include <iomanip>        /* std::setw */
 #include <sstream>        /* std::stringstream */
@@ -69,12 +70,15 @@ base16Encode(const char *in, size_t inLen)
  *
  * @see AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
  *
+ * @todo Consider reusing / converting to TSStringPercentEncode() using a custom map to account for the AWS specific rules.
+ *       Currently we don't build a library/archive so we could link with the unit-test binary. Also using
+ *       different sets of encode/decode functions during runtime and unit-testing did not seem as a good idea.
  * @param in string to be URI encoded
  * @param isObjectName if true don't encode '/', keep it as it is.
  * @return encoded string.
  */
 String
-uriEncode(const String in, bool isObjectName)
+uriEncode(const String &in, bool isObjectName)
 {
   std::stringstream result;
 
@@ -99,6 +103,35 @@ uriEncode(const String in, bool isObjectName)
 }
 
 /**
+ * @brief URI-decode a character string (AWS specific version, see spec)
+ *
+ * @see AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ *
+ * @todo Consider reusing / converting to TSStringPercentDecode()
+ *       Currently we don't build a library/archive so we could link with the unit-test binary. Also using
+ *       different sets of encode/decode functions during runtime and unit-testing did not seem as a good idea.
+ * @param in string to be URI decoded
+ * @return encoded string.
+ */
+String
+uriDecode(const String &in)
+{
+  std::string result;
+  result.reserve(in.length());
+  size_t i = 0;
+  while (i < in.length()) {
+    if (in[i] == '%') {
+      result += static_cast<char>(std::stoi(in.substr(i + 1, 2), nullptr, 16));
+      i += 3;
+    } else {
+      result += in[i];
+      i++;
+    }
+  }
+  return result;
+}
+
+/**
  * @brief trim the white-space character from the beginning and the end of the string ("in-place", just moving pointers around)
  *
  * @param in ptr to an input string
@@ -256,7 +289,18 @@ getCanonicalRequestSha256Hash(TsInterface &api, bool signPayload, const StringSe
     String encodedParam = uriEncode(param, /* isObjectName */ false);
 
     paramNames.insert(encodedParam);
-    paramsMap[encodedParam] = uriEncode(value, /* isObjectName */ false);
+
+    /* Look for '%' first trying to avoid as many uri-decode calls as possible.
+     * it is hard to estimate which is more likely use-case - (1) URIs with uri-encoded query parameter
+     * values or (2) with unencoded which defines the success of this optimization */
+    if (nullptr == memchr(value.c_str(), '%', value.length()) || 0 == uriDecode(value).compare(value)) {
+      /* Not URI-encoded */
+      paramsMap[encodedParam] = uriEncode(value, /* isObjectName */ false);
+    } else {
+      /* URI-encoded, then don't encode since AWS does not encode which is not mentioned in the spec,
+       * asked AWS, still waiting for confirmation */
+      paramsMap[encodedParam] = value;
+    }
   }
 
   String queryStr;
@@ -426,6 +470,7 @@ createDefaultExcludeHeaders()
   StringSet m;
   /* exclude headers that are meant to be changed */
   m.insert("x-forwarded-for");
+  m.insert("forwarded");
   m.insert("via");
   return m;
 }
diff --git a/plugins/s3_auth/aws_auth_v4.h b/plugins/s3_auth/aws_auth_v4.h
index 1959ddf..94d2a52 100644
--- a/plugins/s3_auth/aws_auth_v4.h
+++ b/plugins/s3_auth/aws_auth_v4.h
@@ -52,106 +52,11 @@ public:
   virtual HeaderIterator headerEnd()         = 0;
 };
 
-/* Define a header iterator to be used in the plugin using ATS API */
-class HeaderIterator
-{
-public:
-  HeaderIterator() : _bufp(nullptr), _hdrs(TS_NULL_MLOC), _field(TS_NULL_MLOC) {}
-  HeaderIterator(TSMBuffer bufp, TSMLoc hdrs, TSMLoc field) : _bufp(bufp), _hdrs(hdrs), _field(field) {}
-  HeaderIterator(const HeaderIterator &it)
-  {
-    _bufp  = it._bufp;
-    _hdrs  = it._hdrs;
-    _field = it._field;
-  }
-  ~HeaderIterator() {}
-  HeaderIterator &
-  operator=(HeaderIterator &it)
-  {
-    _bufp  = it._bufp;
-    _hdrs  = it._hdrs;
-    _field = it._field;
-    return *this;
-  }
-  HeaderIterator &operator++()
-  {
-    /* @todo this is said to be slow in the API call comments, do something better here */
-    TSMLoc next = TSMimeHdrFieldNext(_bufp, _hdrs, _field);
-    TSHandleMLocRelease(_bufp, _hdrs, _field);
-    _field = next;
-    return *this;
-  }
-  HeaderIterator operator++(int)
-  {
-    HeaderIterator tmp(*this);
-    operator++();
-    return tmp;
-  }
-  bool
-  operator!=(const HeaderIterator &it)
-  {
-    return _bufp != it._bufp || _hdrs != it._hdrs || _field != it._field;
-  }
-  bool
-  operator==(const HeaderIterator &it)
-  {
-    return _bufp == it._bufp && _hdrs == it._hdrs && _field == it._field;
-  }
-  const char *
-  getName(int *len)
-  {
-    return TSMimeHdrFieldNameGet(_bufp, _hdrs, _field, len);
-  }
-  const char *
-  getValue(int *len)
-  {
-    return TSMimeHdrFieldValueStringGet(_bufp, _hdrs, _field, -1, len);
-  }
-  TSMBuffer _bufp;
-  TSMLoc _hdrs;
-  TSMLoc _field;
-};
-
-/* Define a API to be used in the plugin using ATS API */
-class TsApi : public TsInterface
-{
-public:
-  TsApi(TSMBuffer bufp, TSMLoc hdrs, TSMLoc url) : _bufp(bufp), _hdrs(hdrs), _url(url) {}
-  ~TsApi() {}
-  const char *
-  getMethod(int *len)
-  {
-    return TSHttpHdrMethodGet(_bufp, _hdrs, len);
-  }
-  const char *
-  getHost(int *len)
-  {
-    return TSHttpHdrHostGet(_bufp, _hdrs, len);
-  }
-  const char *
-  getPath(int *len)
-  {
-    return TSUrlPathGet(_bufp, _url, len);
-  }
-  const char *
-  getQuery(int *len)
-  {
-    return TSUrlHttpQueryGet(_bufp, _url, len);
-  }
-  HeaderIterator
-  headerBegin()
-  {
-    return HeaderIterator(_bufp, _hdrs, TSMimeHdrFieldGet(_bufp, _hdrs, 0));
-  }
-  HeaderIterator
-  headerEnd()
-  {
-    return HeaderIterator(_bufp, _hdrs, TS_NULL_MLOC);
-  }
-  TSMBuffer _bufp;
-  TSMLoc _hdrs;
-  TSMLoc _url;
-};
+#ifdef AWS_AUTH_V4_UNIT_TEST
+#include "unit-tests/test_aws_auth_v4.h"
+#else
+#include "aws_auth_v4_wrap.h"
+#endif
 
 /* S3 auth v4 utility API */
 
diff --git a/plugins/s3_auth/aws_auth_v4.h b/plugins/s3_auth/aws_auth_v4_wrap.h
similarity index 53%
copy from plugins/s3_auth/aws_auth_v4.h
copy to plugins/s3_auth/aws_auth_v4_wrap.h
index 1959ddf..b14f9c6 100644
--- a/plugins/s3_auth/aws_auth_v4.h
+++ b/plugins/s3_auth/aws_auth_v4_wrap.h
@@ -17,40 +17,13 @@
 */
 
 /**
- * @file aws_auth_v4.h
- * @brief AWS Auth v4 signing utility.
- * @see aws_auth_v4.cc
+ * @file aws_auth_v4_ts.h
+ * @brief TS API adaptor and header iterator using the TS API which are swapped with mocks during testing.
+ * @see aws_auth_v4.h
  */
 
-#ifndef PLUGINS_S3_AUTH_AWS_AUTH_V4_CC_
-#define PLUGINS_S3_AUTH_AWS_AUTH_V4_CC_
-
-#include <algorithm> /* transform() */
-#include <cstddef>   /* soze_t */
-#include <string>    /* std::string */
-#include <sstream>   /* std::stringstream */
-#include <map>       /* std::map */
-#include <set>       /* std::set */
-
-#include <ts/ts.h>
-
-typedef std::string String;
-typedef std::set<std::string> StringSet;
-typedef std::map<std::string, std::string> StringMap;
-
-class HeaderIterator;
-
-class TsInterface
-{
-public:
-  virtual ~TsInterface(){};
-  virtual const char *getMethod(int *length) = 0;
-  virtual const char *getHost(int *length)   = 0;
-  virtual const char *getPath(int *length)   = 0;
-  virtual const char *getQuery(int *length)  = 0;
-  virtual HeaderIterator headerBegin()       = 0;
-  virtual HeaderIterator headerEnd()         = 0;
-};
+#ifndef PLUGINS_S3_AUTH_AWS_AUTH_V4_WRAP_H_
+#define PLUGINS_S3_AUTH_AWS_AUTH_V4_WRAP_H_
 
 /* Define a header iterator to be used in the plugin using ATS API */
 class HeaderIterator
@@ -153,55 +126,4 @@ public:
   TSMLoc _url;
 };
 
-/* S3 auth v4 utility API */
-
-static const String X_AMZ_CONTENT_SHA256 = "x-amz-content-sha256";
-static const String X_AMX_DATE           = "x-amz-date";
-static const String X_AMZ                = "x-amz-";
-static const String CONTENT_TYPE         = "content-type";
-static const String HOST                 = "host";
-
-String trimWhiteSpaces(const String &s);
-
-template <typename ContainerType>
-void
-commaSeparateString(ContainerType &ss, const String &input, bool trim = true, bool lowerCase = true)
-{
-  std::istringstream istr(input);
-  String token;
-
-  while (std::getline(istr, token, ',')) {
-    token = trim ? trimWhiteSpaces(token) : token;
-    if (lowerCase) {
-      std::transform(token.begin(), token.end(), token.begin(), ::tolower);
-    }
-    ss.insert(ss.end(), token);
-  }
-}
-
-class AwsAuthV4
-{
-public:
-  AwsAuthV4(TsInterface &api, time_t *now, bool signPayload, const char *awsAccessKeyId, size_t awsAccessKeyIdLen,
-            const char *awsSecretAccessKey, size_t awsSecretAccessKeyLen, const char *awsService, size_t awsServiceLen,
-            const StringSet &includedHeaders, const StringSet &excludedHeaders, const StringMap &regionMap);
-  const char *getDateTime(size_t *dateTimeLen);
-  String getPayloadHash();
-  String getAuthorizationHeader();
-
-private:
-  TsInterface &_api;
-  char _dateTime[sizeof "20170428T010203Z"];
-  bool _signPayload               = false;
-  const char *_awsAccessKeyId     = nullptr;
-  size_t _awsAccessKeyIdLen       = 0;
-  const char *_awsSecretAccessKey = nullptr;
-  size_t _awsSecretAccessKeyLen   = 0;
-  const char *_awsService         = nullptr;
-  size_t _awsServiceLen           = 0;
-
-  const StringSet &_includedHeaders;
-  const StringSet &_excludedHeaders;
-  const StringMap &_regionMap;
-};
-#endif /* PLUGINS_S3_AUTH_AWS_AUTH_V4_CC_ */
+#endif /* PLUGINS_S3_AUTH_AWS_AUTH_V4_WRAP_H_ */
diff --git a/plugins/s3_auth/s3_auth.cc b/plugins/s3_auth/s3_auth.cc
index 8213ad6..97e7d8d 100644
--- a/plugins/s3_auth/s3_auth.cc
+++ b/plugins/s3_auth/s3_auth.cc
@@ -364,6 +364,7 @@ public:
 
     /* Exclude headers that are meant to be changed */
     _v4excludeHeaders.insert("x-forwarded-for");
+    _v4excludeHeaders.insert("forwarded");
     _v4excludeHeaders.insert("via");
   }
 
@@ -646,7 +647,7 @@ TSHttpStatus
 S3Request::authorizeV4(S3Config *s3)
 {
   TsApi api(_bufp, _hdr_loc, _url_loc);
-  time_t now = time(0);
+  time_t now = time(nullptr);
 
   AwsAuthV4 util(api, &now, /* signPayload */ false, s3->keyid(), s3->keyid_len(), s3->secret(), s3->secret_len(), "s3", 2,
                  s3->v4includeHeaders(), s3->v4excludeHeaders(), s3->v4RegionMap());
diff --git a/plugins/s3_auth/unit-tests/test_aws_auth_v4.cc b/plugins/s3_auth/unit-tests/test_aws_auth_v4.cc
new file mode 100644
index 0000000..a75b6bf
--- /dev/null
+++ b/plugins/s3_auth/unit-tests/test_aws_auth_v4.cc
@@ -0,0 +1,953 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+*/
+
+/**
+ * @file aws_auth_v4_test.cc
+ * @brief Unit tests for functions implementing S3 auth version 4
+ */
+
+#include <string.h>
+#include <openssl/hmac.h>   /* EVP_MAX_MD_SIZE */
+#define CATCH_CONFIG_MAIN   /* include main function */
+#include <catch.hpp>        /* catch unit-test framework */
+#include "../aws_auth_v4.h" /* S3 auth v4 utility */
+
+/* uriEncode() ***************************************************************************************************************** */
+
+TEST_CASE("uriEncode(): encode empty input", "[AWS][auth][utility]")
+{
+  String in("");
+  String encoded = uriEncode(in, /* isObjectName */ false);
+  CHECK(0 == encoded.length()); /* 0 encoded because of the invalid input */
+}
+
+TEST_CASE("uriEncode(): encode unreserved chars", "[s3_auth]")
+{
+  const String in = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+                    "abcdefghijklmnopqrstuvwxyz"
+                    "0123456789"
+                    "-._~";
+  String encoded = uriEncode(in, /* isObjectName */ false);
+
+  CHECK(in.length() == encoded.length());
+  CHECK_FALSE(encoded.compare("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+                              "abcdefghijklmnopqrstuvwxyz"
+                              "0123456789"
+                              "-._~"));
+}
+
+TEST_CASE("uriEncode(): encode reserved chars in a name which is not object name", "[AWS][auth][utility]")
+{
+  const String in = " /!\"#$%&'()*+,:;<=>?@[\\]^`{|}"; /* some printable but reserved chars */
+  String encoded  = uriEncode(in, /* isObjectName */ false);
+
+  CHECK(3 * in.length() == encoded.length()); /* size of "%NN" = 3 */
+  CHECK_FALSE(encoded.compare("%20%2F%21%22%23%24%25%26%27%28%29%2A%2B%2C%3A%3B%3C%3D%3E%3F%40%5B%5C%5D%5E%60%7B%7C%7D"));
+}
+
+TEST_CASE("uriEncode(): encode reserved chars in an object name", "[AWS][auth][utility]")
+{
+  const String in = " /!\"#$%&'()*+,:;<=>?@[\\]^`{|}"; /* some printable but reserved chars */
+  String encoded  = uriEncode(in, /* isObjectName */ true);
+
+  CHECK(3 * in.length() - 2 == encoded.length()); /* size of "%NN" = 3, '/' is not encoded */
+  CHECK_FALSE(encoded.compare("%20/%21%22%23%24%25%26%27%28%29%2A%2B%2C%3A%3B%3C%3D%3E%3F%40%5B%5C%5D%5E%60%7B%7C%7D"));
+}
+
+TEST_CASE("uriDecode(): decode empty input", "[AWS][auth][utility]")
+{
+  String encoded("");
+  String decoded = uriDecode(encoded);
+  CHECK(0 == decoded.length()); /* 0 encoded because of the invalid input */
+}
+
+TEST_CASE("uriDecode(): decode unreserved chars", "[AWS][auth][utility]")
+{
+  const String encoded = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+                         "abcdefghijklmnopqrstuvwxyz"
+                         "0123456789"
+                         "-._~";
+  String decoded = uriDecode(encoded);
+
+  CHECK(encoded.length() == encoded.length());
+  CHECK_FALSE(decoded.compare("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+                              "abcdefghijklmnopqrstuvwxyz"
+                              "0123456789"
+                              "-._~"));
+}
+
+TEST_CASE("uriDecode(): decode reserved chars", "[AWS][auth][utility]")
+{
+  const String encoded =
+    "%20%2F%21%22%23%24%25%26%27%28%29%2A%2B%2C%3A%3B%3C%3D%3E%3F%40%5B%5C%5D%5E%60%7B%7C%7D"; /* some printable but
+                                                                                                  reserved chars */
+  String decoded = uriDecode(encoded);
+
+  CHECK(3 * decoded.length() == encoded.length()); /* size of "%NN" = 3 */
+  CHECK_FALSE(decoded.compare(" /!\"#$%&'()*+,:;<=>?@[\\]^`{|}"));
+}
+
+/* base16Encode() ************************************************************************************************************** */
+
+TEST_CASE("base16Encode(): base16 encode empty string", "[utility]")
+{
+  const char *in = nullptr;
+  size_t inLen   = 0;
+  String encoded = base16Encode(in, inLen);
+
+  CHECK(0 == encoded.length());
+}
+
+TEST_CASE("base16Encode(): base16 encode RFC4648 test vectors", "[utility]")
+{
+  /* use the test vectors from RFC4648: https://tools.ietf.org/html/rfc4648#section-10 (just convert to lower case) */
+  const char *bench[] = {"",       "",     "f",        "66",    "fo",         "666f",   "foo",
+                         "666f6f", "foob", "666f6f62", "fooba", "666f6f6261", "foobar", "666f6f626172"};
+
+  for (size_t i = 0; i < sizeof(bench) / sizeof(char *); i += 2) {
+    const char *in = bench[i];
+    size_t inLen   = strlen(in);
+    String encoded = base16Encode(in, inLen);
+
+    CHECK(inLen * 2 == encoded.length());
+    CHECK_FALSE(encoded.compare(bench[i + 1]));
+  }
+}
+
+/* trimWhiteSpaces() ******************************************************************************************************** */
+
+TEST_CASE("trimWhiteSpaces(): trim invalid arguments, check pointers", "[utility]")
+{
+  const char *in = nullptr;
+  size_t inLen   = 0;
+  size_t outLen  = 0;
+
+  const char *start = trimWhiteSpaces(in, inLen, outLen);
+
+  CHECK(in == start);
+}
+
+TEST_CASE("trimWhiteSpaces(): trim empty input, check pointers", "[utility]")
+{
+  const char *in = "";
+  size_t inLen   = 0;
+  size_t outLen  = 0;
+
+  const char *start = trimWhiteSpaces(in, inLen, outLen);
+
+  CHECK(in == start);
+}
+
+TEST_CASE("trimWhiteSpaces(): trim nothing to trim, check pointers", "[utility]")
+{
+  const char in[] = "Important Message";
+  size_t inLen    = strlen(in);
+  size_t newLen   = 0;
+
+  const char *start = trimWhiteSpaces(in, inLen, newLen);
+
+  CHECK(in == start);
+  CHECK(inLen == newLen);
+}
+
+TEST_CASE("trimWhiteSpaces(): trim beginning, check pointers", "[utility]")
+{
+  const char in[] = " \t\nImportant Message";
+  size_t inLen    = strlen(in);
+  size_t newLen   = 0;
+
+  const char *start = trimWhiteSpaces(in, inLen, newLen);
+
+  CHECK(in + 3 == start);
+  CHECK(inLen - 3 == newLen);
+}
+
+TEST_CASE("trimWhiteSpaces(): trim end, check pointers", "[utility]")
+{
+  const char in[] = "Important Message \t\n";
+  size_t inLen    = strlen(in);
+  size_t newLen   = 0;
+
+  const char *start = trimWhiteSpaces(in, inLen, newLen);
+
+  CHECK(in == start);
+  CHECK(inLen - 3 == newLen);
+}
+
+TEST_CASE("trimWhiteSpaces(): trim both ends, check pointers", "[utility]")
+{
+  const char in[] = "\v\t\n Important Message \t\n";
+  size_t inLen    = strlen(in);
+  size_t newLen   = 0;
+
+  const char *start = trimWhiteSpaces(in, inLen, newLen);
+
+  CHECK(in + 4 == start);
+  CHECK(inLen - 7 == newLen);
+}
+
+TEST_CASE("trimWhiteSpaces(): trim both, check string", "[utility]")
+{
+  String in      = "\v\t\n Important Message \t\n";
+  String trimmed = trimWhiteSpaces(in);
+
+  CHECK_FALSE(trimmed.compare("Important Message"));
+  CHECK(in.length() - 7 == trimmed.length());
+}
+
+TEST_CASE("trimWhiteSpaces(): trim right, check string", "[utility]")
+{
+  String in      = "Important Message \t\n";
+  String trimmed = trimWhiteSpaces(in);
+
+  CHECK_FALSE(trimmed.compare("Important Message"));
+  CHECK(in.length() - 3 == trimmed.length());
+}
+
+TEST_CASE("trimWhiteSpaces(): trim left, check string", "[utility]")
+{
+  String in      = "\v\t\n Important Message";
+  String trimmed = trimWhiteSpaces(in);
+
+  CHECK_FALSE(trimmed.compare("Important Message"));
+  CHECK(in.length() - 4 == trimmed.length());
+}
+
+TEST_CASE("trimWhiteSpaces(): trim empty, check string", "[utility]")
+{
+  String in      = "\v\t\n  \t\n";
+  String trimmed = trimWhiteSpaces(in);
+
+  CHECK(trimmed.empty());
+  CHECK(0 == trimmed.length());
+}
+
+/* AWS Regions ***************************************************************************************************** */
+
+TEST_CASE("AWSRegions: get region empty input", "[AWS][auth][utility]")
+{
+  const char *host = "";
+  String s         = getRegion(defaultDefaultRegionMap, host, strlen(host));
+  CHECK_FALSE(s.compare("us-east-1"));
+}
+
+TEST_CASE("AWSRegions: get region by providing no bucket name", "[AWS][auth][utility]")
+{
+  const char *host = "s3.eu-west-2.amazonaws.com";
+  String s         = getRegion(defaultDefaultRegionMap, host, strlen(host));
+  CHECK_FALSE(s.compare("eu-west-2"));
+}
+
+TEST_CASE("AWSRegions: get region by providing bucket name having single label", "[AWS][auth][utility]")
+{
+  const char *host = "label1.label2.s3.eu-west-2.amazonaws.com";
+  String s         = getRegion(defaultDefaultRegionMap, host, strlen(host));
+  CHECK_FALSE(s.compare("eu-west-2"));
+}
+
+TEST_CASE("AWSRegions: get region by providing bucket name having multiple labels", "[AWS][auth][utility]")
+{
+  const char *host = "label1.label2.s3.eu-west-2.amazonaws.com";
+  String s         = getRegion(defaultDefaultRegionMap, host, strlen(host));
+  CHECK_FALSE(s.compare("eu-west-2"));
+}
+
+TEST_CASE("AWSRegions: get region by providing bucket name having single label not matching any entry point",
+          "[AWS][auth][utility]")
+{
+  const char *host = "THIS_NEVER_MATCHES.eu-west-2.amazonaws.com";
+  String s         = getRegion(defaultDefaultRegionMap, host, strlen(host));
+  CHECK_FALSE(s.compare("us-east-1"));
+}
+
+TEST_CASE("AWSRegions: get region by providing bucket name having multiple labels not matching any entry point",
+          "[AWS][auth][utility]")
+{
+  const char *host = "label1.label2.THIS_NEVER_MATCHES.eu-west-2.amazonaws.com";
+  String s         = getRegion(defaultDefaultRegionMap, host, strlen(host));
+  CHECK_FALSE(s.compare("us-east-1"));
+}
+
+/* AWS spec tests/example ****************************************************************************************** */
+
+/* Test from docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * User id, secret and time */
+const char *awsSecretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
+const char *awsAccessKeyId     = "AKIAIOSFODNN7EXAMPLE";
+const char *awsService         = "s3";
+
+void
+ValidateBench(TsInterface &api, bool signPayload, time_t *now, const char *bench[], const StringSet &includedHeaders,
+              const StringSet &excludedHeaders)
+{
+  /* Test the main entry point for calculation of the Authorization header content */
+  AwsAuthV4 util(api, now, signPayload, awsAccessKeyId, strlen(awsAccessKeyId), awsSecretAccessKey, strlen(awsSecretAccessKey),
+                 awsService, strlen(awsService), includedHeaders, excludedHeaders, defaultDefaultRegionMap);
+  String authorizationHeader = util.getAuthorizationHeader();
+  CAPTURE(authorizationHeader);
+  CHECK_FALSE(authorizationHeader.compare(bench[0]));
+
+  /* Test payload hash */
+  String payloadHash = util.getPayloadHash();
+  CAPTURE(payloadHash);
+  CHECK_FALSE(payloadHash.compare(bench[5]));
+
+  /* Test the date time header content */
+  size_t dateLen   = 0;
+  const char *date = util.getDateTime(&dateLen);
+  CAPTURE(String(date, dateLen));
+  CHECK_FALSE(String(date, dateLen).compare(bench[2]));
+
+  /* Now test particular test points to pinpoint problems easier in case of regression */
+
+  /* test the canonization of the request */
+  String signedHeaders;
+  String canonicalReq = getCanonicalRequestSha256Hash(api, signPayload, includedHeaders, excludedHeaders, signedHeaders);
+  CAPTURE(canonicalReq);
+  CHECK_FALSE(canonicalReq.compare(bench[1]));
+  CAPTURE(signedHeaders);
+  CHECK_FALSE(signedHeaders.compare(bench[6]));
+
+  /* Test the formating of the date and time */
+  char dateTime[sizeof("20170428T010203Z")];
+  size_t dateTimeLen = getIso8601Time(now, dateTime, sizeof(dateTime));
+  CAPTURE(String(dateTime, dateTimeLen));
+  CHECK_FALSE(String(dateTime, dateTimeLen).compare(bench[2]));
+
+  /* Test the region name */
+  int hostLen      = 0;
+  const char *host = api.getHost(&hostLen);
+  String awsRegion = getRegion(defaultDefaultRegionMap, host, hostLen);
+
+  /* Test string to sign calculation */
+  String stringToSign = getStringToSign(host, hostLen, dateTime, dateTimeLen, awsRegion.c_str(), awsRegion.length(), awsService,
+                                        strlen(awsService), canonicalReq.c_str(), canonicalReq.length());
+  CAPTURE(stringToSign);
+  CHECK_FALSE(stringToSign.compare(bench[3]));
+
+  /* Test the signature calculation */
+  char signature[EVP_MAX_MD_SIZE];
+  size_t signatureLen =
+    getSignature(awsSecretAccessKey, strlen(awsSecretAccessKey), awsRegion.c_str(), awsRegion.length(), awsService,
+                 strlen(awsService), dateTime, 8, stringToSign.c_str(), stringToSign.length(), signature, EVP_MAX_MD_SIZE);
+  String base16Signature = base16Encode(signature, signatureLen);
+  CAPTURE(base16Signature);
+  CHECK_FALSE(base16Signature.compare(bench[4]));
+}
+
+/**
+ * Test from docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * Example: GET Object
+ */
+TEST_CASE("AWSAuthSpecByExample: GET Object", "[AWS][auth][SpecByExample]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("test.txt");
+  api._query.assign("");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Range"]                = "bytes=0-9";
+  api._headers["x-amz-content-sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  const char *bench[] = {
+    /* Authorization Header */
+    "AWS4-HMAC-SHA256 "
+    "Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,"
+    "SignedHeaders=host;range;x-amz-content-sha256;x-amz-date,"
+    "Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41",
+    /* Canonical Request sha256 */
+    "7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972",
+    /* Date and time*/
+    "20130524T000000Z",
+    /* String to sign */
+    "AWS4-HMAC-SHA256\n"
+    "20130524T000000Z\n"
+    "20130524/us-east-1/s3/aws4_request\n"
+    "7344ae5b7ee6c3e7e6b0fe0640412a37625d1fbfff95c48bbb2dc43964946972",
+    /* Signature */
+    "f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41",
+    /* Payload hash */
+    "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+    /* Signed Headers */
+    "host;range;x-amz-content-sha256;x-amz-date",
+  };
+
+  ValidateBench(api, /*signePayload */ true, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+}
+
+/**
+ * Test from docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * Example: GET Bucket Lifecycle
+ */
+TEST_CASE("AWSAuthSpecByExample: GET Bucket Lifecycle", "[AWS][auth][SpecByExample]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("lifecycle");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["x-amz-content-sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  const char *bench[] = {
+    /* Authorization Header */
+    "AWS4-HMAC-SHA256 "
+    "Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,"
+    "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
+    "Signature=fea454ca298b7da1c68078a5d1bdbfbbe0d65c699e0f91ac7a200a0136783543",
+    /* Canonical Request sha256 */
+    "9766c798316ff2757b517bc739a67f6213b4ab36dd5da2f94eaebf79c77395ca",
+    /* Date and time*/
+    "20130524T000000Z",
+    /* String to sign */
+    "AWS4-HMAC-SHA256\n"
+    "20130524T000000Z\n"
+    "20130524/us-east-1/s3/aws4_request\n"
+    "9766c798316ff2757b517bc739a67f6213b4ab36dd5da2f94eaebf79c77395ca",
+    /* Signature */
+    "fea454ca298b7da1c68078a5d1bdbfbbe0d65c699e0f91ac7a200a0136783543",
+    /* Payload hash */
+    "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+    /* Signed Headers */
+    "host;x-amz-content-sha256;x-amz-date",
+  };
+
+  ValidateBench(api, /*signePayload */ true, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+}
+
+/**
+ * Test from docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * Example: Get Bucket (List Objects)
+ */
+TEST_CASE("AWSAuthSpecByExample: Get Bucket List Objects", "[AWS][auth][SpecByExample]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["x-amz-content-sha256"] = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  const char *bench[] = {
+    /* Authorization Header */
+    "AWS4-HMAC-SHA256 "
+    "Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,"
+    "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
+    "Signature=34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7",
+    /* Canonical Request sha256 */
+    "df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7",
+    /* Date and time*/
+    "20130524T000000Z",
+    /* String to sign */
+    "AWS4-HMAC-SHA256\n"
+    "20130524T000000Z\n"
+    "20130524/us-east-1/s3/aws4_request\n"
+    "df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7",
+    /* Signature */
+    "34b48302e7b5fa45bde8084f4b7868a86f0a534bc59db6670ed5711ef69dc6f7",
+    /* Payload hash */
+    "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+    /* Signed Headers */
+    "host;x-amz-content-sha256;x-amz-date",
+  };
+
+  ValidateBench(api, /*signePayload */ true, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+}
+
+/**
+ * Test based on docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * but this time don't sign the payload to test "UNSIGNED-PAYLOAD" feature.
+ * Example: Get Bucket (List Objects)
+ */
+TEST_CASE("AWSAuthSpecByExample: GET Bucket List Objects, unsigned pay-load", "[AWS][auth][SpecByExample]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  const char *bench[] = {
+    /* Authorization Header */
+    "AWS4-HMAC-SHA256 "
+    "Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,"
+    "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
+    "Signature=b1a076428fa68c2c42202ee5a5718b8207f725e451e2157d6b1c393e01fc2e68",
+    /* Canonical Request sha256 */
+    "528623330c85041d6fb82795b6f8d5771825d3568b9f0bc1faa8a49e1f5f9cfc",
+    /* Date and time*/
+    "20130524T000000Z",
+    /* String to sign */
+    "AWS4-HMAC-SHA256\n"
+    "20130524T000000Z\n"
+    "20130524/us-east-1/s3/aws4_request\n"
+    "528623330c85041d6fb82795b6f8d5771825d3568b9f0bc1faa8a49e1f5f9cfc",
+    /* Signature */
+    "b1a076428fa68c2c42202ee5a5718b8207f725e451e2157d6b1c393e01fc2e68",
+    /* Payload hash */
+    "UNSIGNED-PAYLOAD",
+    /* Signed Headers */
+    "host;x-amz-content-sha256;x-amz-date",
+  };
+
+  ValidateBench(api, /*signePayload */ false, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+}
+
+/**
+ * Test based on docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * but this time don't sign the payload to test "UNSIGNED-PAYLOAD" feature and
+ * have extra headers to be excluded from the signature (internal and changing headers)
+ * Example: Get Bucket (List Objects)
+ */
+TEST_CASE("AWSAuthSpecByExample: GET Bucket List Objects, unsigned pay-load, exclude internal and changing headers", "[AWS][auth]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["@internal"]            = "internal value";
+  api._headers["x-forwarded-for"]      = "192.168.7.1";
+  api._headers["via"] = "http/1.1 tcp ipv4 ats_dev[7e67ac60-c204-450d-90be-a426dd3b569f] (ApacheTrafficServer/7.2.0)";
+
+  const char *bench[] = {
+    /* Authorization Header */
+    "AWS4-HMAC-SHA256 "
+    "Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,"
+    "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
+    "Signature=b1a076428fa68c2c42202ee5a5718b8207f725e451e2157d6b1c393e01fc2e68",
+    /* Canonical Request sha256 */
+    "528623330c85041d6fb82795b6f8d5771825d3568b9f0bc1faa8a49e1f5f9cfc",
+    /* Date and time*/
+    "20130524T000000Z",
+    /* String to sign */
+    "AWS4-HMAC-SHA256\n"
+    "20130524T000000Z\n"
+    "20130524/us-east-1/s3/aws4_request\n"
+    "528623330c85041d6fb82795b6f8d5771825d3568b9f0bc1faa8a49e1f5f9cfc",
+    /* Signature */
+    "b1a076428fa68c2c42202ee5a5718b8207f725e451e2157d6b1c393e01fc2e68",
+    /* Payload hash */
+    "UNSIGNED-PAYLOAD",
+    /* Signed Headers */
+    "host;x-amz-content-sha256;x-amz-date",
+  };
+
+  ValidateBench(api, /*signePayload */ false, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+}
+
+/**
+ * Test based on docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+ * but this time query param value is already encoded, it should not URI encode it twice
+ * according to AWS real behavior undocumented in the specification.
+ */
+TEST_CASE("AWSAuthSpecByExample: GET Bucket List Objects, query param value already URI encoded", "[AWS][auth]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("key=TEST==");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  const char *bench[] = {
+    /* Authorization Header */
+    "AWS4-HMAC-SHA256 "
+    "Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,"
+    "SignedHeaders=host;x-amz-content-sha256;x-amz-date,"
+    "Signature=60b410f6a0ffe09b91c2aef1f179945916b45ea215278e6b8f6cfb8d461e3706",
+    /* Canonical Request sha256 */
+    "1035b1d75dad9e94fa99fa6edc2cf7d489f38796109a132721621977737a41cc",
+    /* Date and time*/
+    "20130524T000000Z",
+    /* String to sign */
+    "AWS4-HMAC-SHA256\n"
+    "20130524T000000Z\n"
+    "20130524/us-east-1/s3/aws4_request\n"
+    "1035b1d75dad9e94fa99fa6edc2cf7d489f38796109a132721621977737a41cc",
+    /* Signature */
+    "60b410f6a0ffe09b91c2aef1f179945916b45ea215278e6b8f6cfb8d461e3706",
+    /* Payload hash */
+    "UNSIGNED-PAYLOAD",
+    /* Signed Headers */
+    "host;x-amz-content-sha256;x-amz-date",
+  };
+
+  ValidateBench(api, /*signePayload */ false, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+
+  /* Now make query param value encoded beforehand and expect the same result,
+   * it should not URI encode it twice according to AWS real behavior undocumented in the specification.*/
+  api._query.assign("key=TEST%3D%3D");
+  ValidateBench(api, /*signePayload */ false, &now, bench, defaultIncludeHeaders, defaultExcludeHeaders);
+}
+
+/* Utility parameters related tests ******************************************************************************** */
+
+void
+ValidateBenchCanonicalRequest(TsInterface &api, bool signPayload, time_t *now, const char *bench[],
+                              const StringSet &includedHeaders, const StringSet &excludedHeaders)
+{
+  /* Test the main entry point for calculation of the Authorization header content */
+  AwsAuthV4 util(api, now, signPayload, awsAccessKeyId, strlen(awsAccessKeyId), awsSecretAccessKey, strlen(awsSecretAccessKey),
+                 awsService, strlen(awsService), includedHeaders, excludedHeaders, defaultDefaultRegionMap);
+
+  /* test the canonization of the request */
+  String signedHeaders;
+  String canonicalReq = getCanonicalRequestSha256Hash(api, signPayload, includedHeaders, excludedHeaders, signedHeaders);
+  CHECK_FALSE(signedHeaders.compare(bench[0]));
+  CHECK_FALSE(canonicalReq.compare(bench[1]));
+}
+
+TEST_CASE("S3AuthV4UtilParams: include all headers by default", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include = defaultIncludeHeaders;
+  StringSet exclude = defaultExcludeHeaders;
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;headera;headerb;headerc;headerd;headere;headerf;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "819a275bbd601fd6f6ba39190ee8299d34fcb0f5e0a4c0d8017c35e79a026579",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+TEST_CASE("S3AuthV4UtilParams: include all headers explicit", "[AWS][auth][SpecByExample]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include;
+  commaSeparateString<StringSet>(include, "HeaderA,HeaderB,HeaderC,HeaderD,HeaderE,HeaderF");
+  StringSet exclude = defaultExcludeHeaders;
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;headera;headerb;headerc;headerd;headere;headerf;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "819a275bbd601fd6f6ba39190ee8299d34fcb0f5e0a4c0d8017c35e79a026579",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+TEST_CASE("S3AuthV4UtilParams: exclude all headers explicit", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include = defaultIncludeHeaders;
+  StringSet exclude;
+  commaSeparateString<StringSet>(exclude, "HeaderA,HeaderB,HeaderC,HeaderD,HeaderE,HeaderF");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "ef3088997c69bc860e0bb36f97a8335f38863339e7fd01f2cd17b5391da575fb",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+TEST_CASE("S3AuthV4UtilParams: include/exclude non overlapping headers", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include, exclude;
+  commaSeparateString<StringSet>(include, "HeaderA,HeaderB,HeaderC");
+  commaSeparateString<StringSet>(exclude, "HeaderD,HeaderE,HeaderF");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;headera;headerb;headerc;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "c1c7fb808eefdb712192efeed168fdecef0f8d95e8df5a2569d127068c425209",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+TEST_CASE("S3AuthV4UtilParams: include/exclude overlapping headers", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include, exclude;
+  commaSeparateString<StringSet>(include, "HeaderA,HeaderB,HeaderC");
+  commaSeparateString<StringSet>(exclude, "HeaderC,HeaderD,HeaderE,HeaderF");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;headera;headerb;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "0ac0bd67e304b3c25ec51f01b86c824f7439cdb0a5bc16acdebab73f34e12a57",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+TEST_CASE("S3AuthV4UtilParams: include/exclude overlapping headers missing include", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include, exclude;
+  commaSeparateString<StringSet>(include, "HeaderA,HeaderC");
+  commaSeparateString<StringSet>(exclude, "HeaderC,HeaderD,HeaderE,HeaderF");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;headera;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "5b5bef63c923fed685230feb91d8059fe8d56c80d21ba6922ee335ff3fcc45bf",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+TEST_CASE("S3AuthV4UtilParams: include/exclude overlapping headers missing exclude", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+  api._headers["HeaderA"]              = "HeaderAValue";
+  api._headers["HeaderB"]              = "HeaderBValue";
+  api._headers["HeaderC"]              = "HeaderCValue";
+  api._headers["HeaderD"]              = "HeaderDValue";
+  api._headers["HeaderE"]              = "HeaderEValue";
+  api._headers["HeaderF"]              = "HeaderFValue";
+
+  StringSet include, exclude;
+  commaSeparateString<StringSet>(include, "HeaderA,HeaderB,HeaderC");
+  commaSeparateString<StringSet>(exclude, "HeaderC,HeaderD,HeaderF");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;headera;headerb;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "0ac0bd67e304b3c25ec51f01b86c824f7439cdb0a5bc16acdebab73f34e12a57",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+/*
+ * Mandatory headers Host, x-amz-* and Content-Type will must be included even if the user asked to exclude them.
+ */
+TEST_CASE("S3AuthV4UtilParams: include content type", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["Content-Type"]         = "gzip";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  StringSet include = defaultIncludeHeaders;
+  StringSet exclude;
+  commaSeparateString<StringSet>(exclude, "Content-Type,x-amz-content-sha256,x-amz-date");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "content-type;host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "ef3088997c69bc860e0bb36f97a8335f38863339e7fd01f2cd17b5391da575fb",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
+
+/*
+ * Mandatory headers Host, x-amz-* and Content-Type will must be included even if the user asked to exclude them.
+ * Content-type should not be signed if missing from the HTTP request.
+ */
+TEST_CASE("S3AuthV4UtilParams: include missing content type", "[AWS][auth][utility]")
+{
+  time_t now = 1369353600; /* 5/24/2013 00:00:00 GMT */
+
+  /* Define the HTTP request elements */
+  MockTsInterface api;
+  api._method.assign("GET");
+  api._host.assign("examplebucket.s3.amazonaws.com");
+  api._path.assign("");
+  api._query.assign("max-keys=2&prefix=J");
+  api._headers["Host"]                 = "examplebucket.s3.amazonaws.com";
+  api._headers["x-amz-content-sha256"] = "UNSIGNED-PAYLOAD";
+  api._headers["x-amz-date"]           = "20130524T000000Z";
+
+  StringSet include = defaultIncludeHeaders;
+  StringSet exclude;
+  commaSeparateString<StringSet>(exclude, "Content-Type,x-amz-content-sha256,x-amz-date");
+
+  const char *bench[] = {
+    /* Signed Headers */
+    "host;x-amz-content-sha256;x-amz-date",
+    /* Canonical Request sha256 */
+    "528623330c85041d6fb82795b6f8d5771825d3568b9f0bc1faa8a49e1f5f9cfc",
+  };
+
+  ValidateBenchCanonicalRequest(api, /*signePayload */ false, &now, bench, include, exclude);
+}
diff --git a/plugins/s3_auth/unit-tests/test_aws_auth_v4.h b/plugins/s3_auth/unit-tests/test_aws_auth_v4.h
new file mode 100644
index 0000000..b46274d
--- /dev/null
+++ b/plugins/s3_auth/unit-tests/test_aws_auth_v4.h
@@ -0,0 +1,146 @@
+/*
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+*/
+
+/**
+ * @file test_aws_auth_v4.h
+ * @brief TS API mock and and mock header iterator used for unit testing.
+ * @see test_aws_auth_v4.cc
+ */
+
+#ifndef PLUGINS_S3_AUTH_UNIT_TESTS_TEST_AWS_AUTH_V4_H_
+#define PLUGINS_S3_AUTH_UNIT_TESTS_TEST_AWS_AUTH_V4_H_
+
+#include <string> /* std::string */
+
+/* Define a header iterator to be used in unit tests */
+class HeaderIterator
+{
+public:
+  HeaderIterator(const StringMap::iterator &it) { _it = it; }
+  HeaderIterator(const HeaderIterator &i) { _it = i._it; }
+  ~HeaderIterator() {}
+  HeaderIterator &
+  operator=(HeaderIterator &i)
+  {
+    _it = i._it;
+    return *this;
+  }
+  HeaderIterator &operator++()
+  {
+    _it++;
+    return *this;
+  }
+  HeaderIterator operator++(int)
+  {
+    HeaderIterator tmp(*this);
+    operator++();
+    return tmp;
+  }
+  bool
+  operator!=(const HeaderIterator &it)
+  {
+    return _it != it._it;
+  }
+  const char *
+  getName(int *len)
+  {
+    *len = _it->first.length();
+    return _it->first.c_str();
+  }
+  const char *
+  getValue(int *len)
+  {
+    *len = _it->second.length();
+    return _it->second.c_str();
+  }
+  StringMap::iterator _it;
+};
+
+/* Define a mock API to be used in unit-tests */
+class MockTsInterface : public TsInterface
+{
+public:
+  const char *
+  getMethod(int *length)
+  {
+    *length = _method.length();
+    return _method.c_str();
+  }
+  const char *
+  getHost(int *length)
+  {
+    *length = _host.length();
+    return _host.c_str();
+  }
+  const char *
+  getPath(int *length)
+  {
+    *length = _path.length();
+    return _path.c_str();
+  }
+  const char *
+  getQuery(int *length)
+  {
+    *length = _query.length();
+    return _query.c_str();
+  }
+  HeaderIterator
+  headerBegin()
+  {
+    return HeaderIterator(_headers.begin());
+  }
+  HeaderIterator
+  headerEnd()
+  {
+    return HeaderIterator(_headers.end());
+  }
+
+  String _method;
+  String _host;
+  String _path;
+  String _query;
+  StringMap _headers;
+};
+
+/* Expose the following methods only to the unit tests */
+String base16Encode(const char *in, size_t inLen);
+String uriEncode(const String &in, bool isObjectName = false);
+String uriDecode(const String &in);
+String lowercase(const char *in, size_t inLen);
+const char *trimWhiteSpaces(const char *in, size_t inLen, size_t &newLen);
+
+String getCanonicalRequestSha256Hash(TsInterface &api, bool signPayload, const StringSet &includeHeaders,
+                                     const StringSet &excludeHeaders, String &signedHeaders);
+String getStringToSign(TsInterface &api, const char *dateTime, size_t dateTimeLen, const char *canonicalRequestSha256Hash,
+                       size_t canonicalRequestSha256HashLen);
+String getStringToSign(const char *host, size_t hostLen, const char *dateTime, size_t dateTimeLen, const char *region,
+                       size_t regionLen, const char *service, size_t serviceLen, const char *canonicalRequestSha256Hash,
+                       size_t canonicalRequestSha256HashLen);
+String getRegion(const StringMap &regionMap, const char *host, size_t hostLen);
+size_t hmacSha256(const char *secret, size_t secretLen, const char *msg, size_t msgLen, char *hmac, size_t hmacLen);
+
+size_t getSignature(const char *awsSecret, size_t awsSecretLen, const char *awsRegion, size_t awsRegionLen, const char *awsService,
+                    size_t awsServiceLen, const char *dateTime, size_t dateTimeLen, const char *stringToSign,
+                    size_t stringToSignLen, char *base16Signature, size_t base16SignatureLen);
+size_t getIso8601Time(time_t *now, char *dateTime, size_t dateTimeLen);
+
+extern const StringMap defaultDefaultRegionMap;
+extern const StringSet defaultExcludeHeaders;
+extern const StringSet defaultIncludeHeaders;
+
+#endif /* PLUGINS_S3_AUTH_UNIT_TESTS_TEST_AWS_AUTH_V4_H_ */
diff --git a/plugins/tcpinfo/tcpinfo.cc b/plugins/tcpinfo/tcpinfo.cc
index c1d2148..8d228bc 100644
--- a/plugins/tcpinfo/tcpinfo.cc
+++ b/plugins/tcpinfo/tcpinfo.cc
@@ -337,7 +337,7 @@ TSPluginInit(int argc, const char *argv[])
   for (;;) {
     unsigned long lval;
 
-    switch (getopt_long(argc, (char *const *)argv, "r:f:l:h:e:H:S:M:", longopts, NULL)) {
+    switch (getopt_long(argc, (char *const *)argv, "r:f:l:h:e:H:S:M:", longopts, nullptr)) {
     case 'r':
       if (parse_unsigned(optarg, lval)) {
         config->sample = atoi(optarg);
diff --git a/proxy/Crash.cc b/proxy/Crash.cc
index b99abde..366d5fd 100644
--- a/proxy/Crash.cc
+++ b/proxy/Crash.cc
@@ -40,7 +40,7 @@ static char *
 create_logger_path()
 {
   RecString name;
-  ats_scoped_str bindir;
+  std::string bindir;
   ats_scoped_str fullpath;
 
   if (RecGetRecordString_Xmalloc("proxy.config.crash_log_helper", &name) != REC_ERR_OKAY) {
diff --git a/proxy/InkAPI.cc b/proxy/InkAPI.cc
index b4614b0..a710a5e 100644
--- a/proxy/InkAPI.cc
+++ b/proxy/InkAPI.cc
@@ -216,6 +216,7 @@ tsapi const char *TS_MIME_FIELD_WARNING;
 tsapi const char *TS_MIME_FIELD_WWW_AUTHENTICATE;
 tsapi const char *TS_MIME_FIELD_XREF;
 tsapi const char *TS_MIME_FIELD_X_FORWARDED_FOR;
+tsapi const char *TS_MIME_FIELD_FORWARDED;
 
 /* MIME fields string lengths */
 tsapi int TS_MIME_LEN_ACCEPT;
@@ -290,6 +291,7 @@ tsapi int TS_MIME_LEN_WARNING;
 tsapi int TS_MIME_LEN_WWW_AUTHENTICATE;
 tsapi int TS_MIME_LEN_XREF;
 tsapi int TS_MIME_LEN_X_FORWARDED_FOR;
+tsapi int TS_MIME_LEN_FORWARDED;
 
 /* HTTP miscellaneous values */
 tsapi const char *TS_HTTP_VALUE_BYTES;
@@ -1500,6 +1502,7 @@ api_init()
     TS_MIME_FIELD_WWW_AUTHENTICATE          = MIME_FIELD_WWW_AUTHENTICATE;
     TS_MIME_FIELD_XREF                      = MIME_FIELD_XREF;
     TS_MIME_FIELD_X_FORWARDED_FOR           = MIME_FIELD_X_FORWARDED_FOR;
+    TS_MIME_FIELD_FORWARDED                 = MIME_FIELD_FORWARDED;
 
     TS_MIME_LEN_ACCEPT                    = MIME_LEN_ACCEPT;
     TS_MIME_LEN_ACCEPT_CHARSET            = MIME_LEN_ACCEPT_CHARSET;
@@ -1573,6 +1576,7 @@ api_init()
     TS_MIME_LEN_WWW_AUTHENTICATE          = MIME_LEN_WWW_AUTHENTICATE;
     TS_MIME_LEN_XREF                      = MIME_LEN_XREF;
     TS_MIME_LEN_X_FORWARDED_FOR           = MIME_LEN_X_FORWARDED_FOR;
+    TS_MIME_LEN_FORWARDED                 = MIME_LEN_FORWARDED;
 
     /* HTTP methods */
     TS_HTTP_METHOD_CONNECT = HTTP_METHOD_CONNECT;
@@ -1769,21 +1773,22 @@ TShrtime()
 const char *
 TSInstallDirGet(void)
 {
-  return Layout::get()->prefix;
+  static std::string prefix = Layout::get()->prefix;
+  return prefix.c_str();
 }
 
 const char *
 TSConfigDirGet(void)
 {
-  static const char *sysconfdir = RecConfigReadConfigDir();
-  return sysconfdir;
+  static std::string sysconfdir = RecConfigReadConfigDir();
+  return sysconfdir.c_str();
 }
 
 const char *
 TSRuntimeDirGet(void)
 {
-  static const char *runtimedir = RecConfigReadRuntimeDir();
-  return runtimedir;
+  static std::string runtimedir = RecConfigReadRuntimeDir();
+  return runtimedir.c_str();
 }
 
 const char *
@@ -1811,9 +1816,8 @@ TSTrafficServerVersionGetPatch()
 const char *
 TSPluginDirGet(void)
 {
-  static const char *path = RecConfigReadPluginDir();
-
-  return path;
+  static std::string path = RecConfigReadPluginDir();
+  return path.c_str();
 }
 
 ////////////////////////////////////////////////////////////////////
@@ -7644,10 +7648,21 @@ TSHttpTxnServerPush(TSHttpTxn txnp, const char *url, int url_len)
     url_obj.destroy();
     return;
   }
+
   HttpSM *sm          = reinterpret_cast<HttpSM *>(txnp);
   Http2Stream *stream = dynamic_cast<Http2Stream *>(sm->ua_session);
   if (stream) {
-    stream->push_promise(url_obj);
+    Http2ClientSession *parent = static_cast<Http2ClientSession *>(stream->get_parent());
+    if (!parent->is_url_pushed(url, url_len)) {
+      HTTPHdr *hptr = &(sm->t_state.hdr_info.client_request);
+      TSMLoc obj    = reinterpret_cast<TSMLoc>(hptr->m_http);
+
+      MIMEHdrImpl *mh = _hdr_mloc_to_mime_hdr_impl(obj);
+      MIMEField *f    = mime_hdr_field_find(mh, MIME_FIELD_ACCEPT_ENCODING, MIME_LEN_ACCEPT_ENCODING);
+      stream->push_promise(url_obj, f);
+
+      parent->add_url_to_pushed_table(url, url_len);
+    }
   }
   url_obj.destroy();
 }
@@ -7830,6 +7845,9 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr
   case TS_CONFIG_HTTP_INSERT_SQUID_X_FORWARDED_FOR:
     ret = _memberp_to_generic(&overridableHttpConfig->insert_squid_x_forwarded_for, typep);
     break;
+  case TS_CONFIG_HTTP_INSERT_FORWARDED:
+    ret = _memberp_to_generic(&overridableHttpConfig->insert_forwarded, typep);
+    break;
   case TS_CONFIG_HTTP_SERVER_TCP_INIT_CWND:
     ret = _memberp_to_generic(&overridableHttpConfig->server_tcp_init_cwnd, typep);
     break;
@@ -7962,8 +7980,8 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr
   case TS_CONFIG_HTTP_CACHE_RANGE_LOOKUP:
     ret = _memberp_to_generic(&overridableHttpConfig->cache_range_lookup, typep);
     break;
-  case TS_CONFIG_HTTP_NORMALIZE_AE_GZIP:
-    ret = _memberp_to_generic(&overridableHttpConfig->normalize_ae_gzip, typep);
+  case TS_CONFIG_HTTP_NORMALIZE_AE:
+    ret = _memberp_to_generic(&overridableHttpConfig->normalize_ae, typep);
     break;
   case TS_CONFIG_HTTP_DEFAULT_BUFFER_SIZE:
     ret = _memberp_to_generic(&overridableHttpConfig->default_buffer_size_index, typep);
@@ -8254,6 +8272,17 @@ TSHttpTxnConfigStringSet(TSHttpTxn txnp, TSOverridableConfigKey conf, const char
       s->t_state.txn_conf->client_cert_filepath = const_cast<char *>(value);
     }
     break;
+  case TS_CONFIG_HTTP_INSERT_FORWARDED:
+    if (value && length > 0) {
+      ts::LocalBufferWriter<1024> error;
+      HttpForwarded::OptionBitSet bs = HttpForwarded::optStrToBitset(ts::string_view(value, length), error);
+      if (!error.size()) {
+        s->t_state.txn_conf->insert_forwarded = bs;
+      } else {
+        Error("HTTP %.*s", static_cast<int>(error.size()), error.data());
+      }
+    }
+    break;
   default:
     return TS_ERROR;
     break;
@@ -8324,6 +8353,12 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf,
     }
     break;
 
+  case 30:
+    if (!strncmp(name, "proxy.config.http.normalize_ae", length)) {
+      cnf = TS_CONFIG_HTTP_NORMALIZE_AE;
+    }
+    break;
+
   case 31:
     if (!strncmp(name, "proxy.config.http.chunking.size", length)) {
       cnf = TS_CONFIG_HTTP_CHUNKING_SIZE;
@@ -8344,21 +8379,15 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf,
       cnf = TS_CONFIG_HTTP_CACHE_GENERATION;
     } else if (!strncmp(name, "proxy.config.http.insert_client_ip", length)) {
       cnf = TS_CONFIG_HTTP_ANONYMIZE_INSERT_CLIENT_IP;
+    } else if (!strncmp(name, "proxy.config.http.insert_forwarded", length)) {
+      cnf = TS_CONFIG_HTTP_INSERT_FORWARDED;
+      typ = TS_RECORDDATATYPE_STRING;
     }
     break;
 
   case 35:
-    switch (name[length - 1]) {
-    case 'e':
-      if (!strncmp(name, "proxy.config.http.cache.range.write", length)) {
-        cnf = TS_CONFIG_HTTP_CACHE_RANGE_WRITE;
-      }
-      break;
-    case 'p':
-      if (!strncmp(name, "proxy.config.http.normalize_ae_gzip", length)) {
-        cnf = TS_CONFIG_HTTP_NORMALIZE_AE_GZIP;
-      }
-      break;
+    if (!strncmp(name, "proxy.config.http.cache.range.write", length)) {
+      cnf = TS_CONFIG_HTTP_CACHE_RANGE_WRITE;
     }
     break;
 
@@ -9287,7 +9316,7 @@ TSClientRequestUuidGet(TSHttpTxn txnp, char *uuid_str)
   const char *machine = (char *)Machine::instance()->uuid.getString();
   int len;
 
-  len = snprintf(uuid_str, TS_CRUUID_STRING_LEN, "%s-%" PRId64 "", machine, sm->sm_id);
+  len = snprintf(uuid_str, TS_CRUUID_STRING_LEN + 1, "%s-%" PRId64 "", machine, sm->sm_id);
   if (len > TS_CRUUID_STRING_LEN) {
     return TS_ERROR;
   }
diff --git a/proxy/InkAPITest.cc b/proxy/InkAPITest.cc
index 6ed87d3..bc97c62 100644
--- a/proxy/InkAPITest.cc
+++ b/proxy/InkAPITest.cc
@@ -1622,12 +1622,13 @@ REGRESSION_TEST(SDK_API_TSMutexCreate)(RegressionTest *test, int /* atype ATS_UN
   TSMutexLock(mutexp);
 
   /* This is normal because all locking is from the same thread */
-  TSReturnCode lock = TS_ERROR;
+  TSReturnCode lock1 = TS_ERROR;
+  TSReturnCode lock2 = TS_ERROR;
 
-  TSMutexLockTry(mutexp);
-  lock = TSMutexLockTry(mutexp);
+  lock1 = TSMutexLockTry(mutexp);
+  lock2 = TSMutexLockTry(mutexp);
 
-  if (TS_SUCCESS == lock) {
+  if (TS_SUCCESS == lock1 && TS_SUCCESS == lock2) {
     SDK_RPRINT(test, "TSMutexCreate", "TestCase1", TC_PASS, "ok");
     SDK_RPRINT(test, "TSMutexLock", "TestCase1", TC_PASS, "ok");
     SDK_RPRINT(test, "TSMutexLockTry", "TestCase1", TC_PASS, "ok");
@@ -7476,120 +7477,119 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectServerIntercept)(RegressionTest *
 ////////////////////////////////////////////////
 
 // The order of these should be the same as TSOverridableConfigKey
-const char *SDK_Overridable_Configs[TS_CONFIG_LAST_ENTRY] = {
-  "proxy.config.url_remap.pristine_host_hdr",
-  "proxy.config.http.chunking_enabled",
-  "proxy.config.http.negative_caching_enabled",
-  "proxy.config.http.negative_caching_lifetime",
-  "proxy.config.http.cache.when_to_revalidate",
-  "proxy.config.http.keep_alive_enabled_in",
-  "proxy.config.http.keep_alive_enabled_out",
-  "proxy.config.http.keep_alive_post_out",
-  "proxy.config.http.server_session_sharing.match",
-  "proxy.config.net.sock_recv_buffer_size_out",
-  "proxy.config.net.sock_send_buffer_size_out",
-  "proxy.config.net.sock_option_flag_out",
-  "proxy.config.http.forward.proxy_auth_to_parent",
-  "proxy.config.http.anonymize_remove_from",
-  "proxy.config.http.anonymize_remove_referer",
-  "proxy.config.http.anonymize_remove_user_agent",
-  "proxy.config.http.anonymize_remove_cookie",
-  "proxy.config.http.anonymize_remove_client_ip",
-  "proxy.config.http.insert_client_ip",
-  "proxy.config.http.response_server_enabled",
-  "proxy.config.http.insert_squid_x_forwarded_for",
-  "proxy.config.http.server_tcp_init_cwnd",
-  "proxy.config.http.send_http11_requests",
-  "proxy.config.http.cache.http",
-  "proxy.config.http.cache.ignore_client_no_cache",
-  "proxy.config.http.cache.ignore_client_cc_max_age",
-  "proxy.config.http.cache.ims_on_client_no_cache",
-  "proxy.config.http.cache.ignore_server_no_cache",
-  "proxy.config.http.cache.cache_responses_to_cookies",
-  "proxy.config.http.cache.ignore_authentication",
-  "proxy.config.http.cache.cache_urls_that_look_dynamic",
-  "proxy.config.http.cache.required_headers",
-  "proxy.config.http.insert_request_via_str",
-  "proxy.config.http.insert_response_via_str",
-  "proxy.config.http.cache.heuristic_min_lifetime",
-  "proxy.config.http.cache.heuristic_max_lifetime",
-  "proxy.config.http.cache.guaranteed_min_lifetime",
-  "proxy.config.http.cache.guaranteed_max_lifetime",
-  "proxy.config.http.cache.max_stale_age",
-  "proxy.config.http.keep_alive_no_activity_timeout_in",
-  "proxy.config.http.keep_alive_no_activity_timeout_out",
-  "proxy.config.http.transaction_no_activity_timeout_in",
-  "proxy.config.http.transaction_no_activity_timeout_out",
-  "proxy.config.http.transaction_active_timeout_out",
-  "proxy.config.http.origin_max_connections",
-  "proxy.config.http.connect_attempts_max_retries",
-  "proxy.config.http.connect_attempts_max_retries_dead_server",
-  "proxy.config.http.connect_attempts_rr_retries",
-  "proxy.config.http.connect_attempts_timeout",
-  "proxy.config.http.post_connect_attempts_timeout",
-  "proxy.config.http.down_server.cache_time",
-  "proxy.config.http.down_server.abort_threshold",
-  "proxy.config.http.doc_in_cache_skip_dns",
-  "proxy.config.http.background_fill_active_timeout",
-  "proxy.config.http.response_server_str",
-  "proxy.config.http.cache.heuristic_lm_factor",
-  "proxy.config.http.background_fill_completed_threshold",
-  "proxy.config.net.sock_packet_mark_out",
-  "proxy.config.net.sock_packet_tos_out",
-  "proxy.config.http.insert_age_in_response",
-  "proxy.config.http.chunking.size",
-  "proxy.config.http.flow_control.enabled",
-  "proxy.config.http.flow_control.low_water",
-  "proxy.config.http.flow_control.high_water",
-  "proxy.config.http.cache.range.lookup",
-  "proxy.config.http.normalize_ae_gzip",
-  "proxy.config.http.default_buffer_size",
-  "proxy.config.http.default_buffer_water_mark",
-  "proxy.config.http.request_header_max_size",
-  "proxy.config.http.response_header_max_size",
-  "proxy.config.http.negative_revalidating_enabled",
-  "proxy.config.http.negative_revalidating_lifetime",
-  "proxy.config.ssl.hsts_max_age",
-  "proxy.config.ssl.hsts_include_subdomains",
-  "proxy.config.http.cache.open_read_retry_time",
-  "proxy.config.http.cache.max_open_read_retries",
-  "proxy.config.http.cache.range.write",
-  "proxy.config.http.post.check.content_length.enabled",
-  "proxy.config.http.global_user_agent_header",
-  "proxy.config.http.auth_server_session_private",
-  "proxy.config.http.slow.log.threshold",
-  "proxy.config.http.cache.generation",
-  "proxy.config.body_factory.template_base",
-  "proxy.config.http.cache.open_write_fail_action",
-  "proxy.config.http.number_of_redirections",
-  "proxy.config.http.cache.max_open_write_retries",
-  "proxy.config.http.redirect_use_orig_cache_key",
-  "proxy.config.http.attach_server_session_to_client",
-  "proxy.config.http.origin_max_connections_queue",
-  "proxy.config.websocket.no_activity_timeout",
-  "proxy.config.websocket.active_timeout",
-  "proxy.config.http.uncacheable_requests_bypass_parent",
-  "proxy.config.http.parent_proxy.total_connect_attempts",
-  "proxy.config.http.transaction_active_timeout_in",
-  "proxy.config.srv_enabled",
-  "proxy.config.http.forward_connect_method",
-  "proxy.config.ssl.client.cert.filename",
-  "proxy.config.ssl.client.cert.path",
-  "proxy.config.http.parent_proxy.mark_down_hostdb",
-  "proxy.config.ssl.client.verify.server",
-  "proxy.config.http.cache.enable_default_vary_headers",
-  "proxy.config.http.cache.vary_default_text",
-  "proxy.config.http.cache.vary_default_images",
-  "proxy.config.http.cache.vary_default_other",
-  "proxy.config.http.cache.ignore_accept_mismatch",
-  "proxy.config.http.cache.ignore_accept_language_mismatch",
-  "proxy.config.http.cache.ignore_accept_encoding_mismatch",
-  "proxy.config.http.cache.ignore_accept_charset_mismatch",
-  "proxy.config.http.parent_proxy.fail_threshold",
-  "proxy.config.http.parent_proxy.retry_time",
-  "proxy.config.http.parent_proxy.per_parent_connect_attempts",
-  "proxy.config.http.parent_proxy.connect_attempts_timeout",
-};
+const char *SDK_Overridable_Configs[TS_CONFIG_LAST_ENTRY] = {"proxy.config.url_remap.pristine_host_hdr",
+                                                             "proxy.config.http.chunking_enabled",
+                                                             "proxy.config.http.negative_caching_enabled",
+                                                             "proxy.config.http.negative_caching_lifetime",
+                                                             "proxy.config.http.cache.when_to_revalidate",
+                                                             "proxy.config.http.keep_alive_enabled_in",
+                                                             "proxy.config.http.keep_alive_enabled_out",
+                                                             "proxy.config.http.keep_alive_post_out",
+                                                             "proxy.config.http.server_session_sharing.match",
+                                                             "proxy.config.net.sock_recv_buffer_size_out",
+                                                             "proxy.config.net.sock_send_buffer_size_out",
+                                                             "proxy.config.net.sock_option_flag_out",
+                                                             "proxy.config.http.forward.proxy_auth_to_parent",
+                                                             "proxy.config.http.anonymize_remove_from",
+                                                             "proxy.config.http.anonymize_remove_referer",
+                                                             "proxy.config.http.anonymize_remove_user_agent",
+                                                             "proxy.config.http.anonymize_remove_cookie",
+                                                             "proxy.config.http.anonymize_remove_client_ip",
+                                                             "proxy.config.http.insert_client_ip",
+                                                             "proxy.config.http.response_server_enabled",
+                                                             "proxy.config.http.insert_squid_x_forwarded_for",
+                                                             "proxy.config.http.server_tcp_init_cwnd",
+                                                             "proxy.config.http.send_http11_requests",
+                                                             "proxy.config.http.cache.http",
+                                                             "proxy.config.http.cache.ignore_client_no_cache",
+                                                             "proxy.config.http.cache.ignore_client_cc_max_age",
+                                                             "proxy.config.http.cache.ims_on_client_no_cache",
+                                                             "proxy.config.http.cache.ignore_server_no_cache",
+                                                             "proxy.config.http.cache.cache_responses_to_cookies",
+                                                             "proxy.config.http.cache.ignore_authentication",
+                                                             "proxy.config.http.cache.cache_urls_that_look_dynamic",
+                                                             "proxy.config.http.cache.required_headers",
+                                                             "proxy.config.http.insert_request_via_str",
+                                                             "proxy.config.http.insert_response_via_str",
+                                                             "proxy.config.http.cache.heuristic_min_lifetime",
+                                                             "proxy.config.http.cache.heuristic_max_lifetime",
+                                                             "proxy.config.http.cache.guaranteed_min_lifetime",
+                                                             "proxy.config.http.cache.guaranteed_max_lifetime",
+                                                             "proxy.config.http.cache.max_stale_age",
+                                                             "proxy.config.http.keep_alive_no_activity_timeout_in",
+                                                             "proxy.config.http.keep_alive_no_activity_timeout_out",
+                                                             "proxy.config.http.transaction_no_activity_timeout_in",
+                                                             "proxy.config.http.transaction_no_activity_timeout_out",
+                                                             "proxy.config.http.transaction_active_timeout_out",
+                                                             "proxy.config.http.origin_max_connections",
+                                                             "proxy.config.http.connect_attempts_max_retries",
+                                                             "proxy.config.http.connect_attempts_max_retries_dead_server",
+                                                             "proxy.config.http.connect_attempts_rr_retries",
+                                                             "proxy.config.http.connect_attempts_timeout",
+                                                             "proxy.config.http.post_connect_attempts_timeout",
+                                                             "proxy.config.http.down_server.cache_time",
+                                                             "proxy.config.http.down_server.abort_threshold",
+                                                             "proxy.config.http.doc_in_cache_skip_dns",
+                                                             "proxy.config.http.background_fill_active_timeout",
+                                                             "proxy.config.http.response_server_str",
+                                                             "proxy.config.http.cache.heuristic_lm_factor",
+                                                             "proxy.config.http.background_fill_completed_threshold",
+                                                             "proxy.config.net.sock_packet_mark_out",
+                                                             "proxy.config.net.sock_packet_tos_out",
+                                                             "proxy.config.http.insert_age_in_response",
+                                                             "proxy.config.http.chunking.size",
+                                                             "proxy.config.http.flow_control.enabled",
+                                                             "proxy.config.http.flow_control.low_water",
+                                                             "proxy.config.http.flow_control.high_water",
+                                                             "proxy.config.http.cache.range.lookup",
+                                                             "proxy.config.http.default_buffer_size",
+                                                             "proxy.config.http.default_buffer_water_mark",
+                                                             "proxy.config.http.request_header_max_size",
+                                                             "proxy.config.http.response_header_max_size",
+                                                             "proxy.config.http.negative_revalidating_enabled",
+                                                             "proxy.config.http.negative_revalidating_lifetime",
+                                                             "proxy.config.ssl.hsts_max_age",
+                                                             "proxy.config.ssl.hsts_include_subdomains",
+                                                             "proxy.config.http.cache.open_read_retry_time",
+                                                             "proxy.config.http.cache.max_open_read_retries",
+                                                             "proxy.config.http.cache.range.write",
+                                                             "proxy.config.http.post.check.content_length.enabled",
+                                                             "proxy.config.http.global_user_agent_header",
+                                                             "proxy.config.http.auth_server_session_private",
+                                                             "proxy.config.http.slow.log.threshold",
+                                                             "proxy.config.http.cache.generation",
+                                                             "proxy.config.body_factory.template_base",
+                                                             "proxy.config.http.cache.open_write_fail_action",
+                                                             "proxy.config.http.number_of_redirections",
+                                                             "proxy.config.http.cache.max_open_write_retries",
+                                                             "proxy.config.http.redirect_use_orig_cache_key",
+                                                             "proxy.config.http.attach_server_session_to_client",
+                                                             "proxy.config.http.origin_max_connections_queue",
+                                                             "proxy.config.websocket.no_activity_timeout",
+                                                             "proxy.config.websocket.active_timeout",
+                                                             "proxy.config.http.uncacheable_requests_bypass_parent",
+                                                             "proxy.config.http.parent_proxy.total_connect_attempts",
+                                                             "proxy.config.http.transaction_active_timeout_in",
+                                                             "proxy.config.srv_enabled",
+                                                             "proxy.config.http.forward_connect_method",
+                                                             "proxy.config.ssl.client.cert.filename",
+                                                             "proxy.config.ssl.client.cert.path",
+                                                             "proxy.config.http.parent_proxy.mark_down_hostdb",
+                                                             "proxy.config.ssl.client.verify.server",
+                                                             "proxy.config.http.cache.enable_default_vary_headers",
+                                                             "proxy.config.http.cache.vary_default_text",
+                                                             "proxy.config.http.cache.vary_default_images",
+                                                             "proxy.config.http.cache.vary_default_other",
+                                                             "proxy.config.http.cache.ignore_accept_mismatch",
+                                                             "proxy.config.http.cache.ignore_accept_language_mismatch",
+                                                             "proxy.config.http.cache.ignore_accept_encoding_mismatch",
+                                                             "proxy.config.http.cache.ignore_accept_charset_mismatch",
+                                                             "proxy.config.http.parent_proxy.fail_threshold",
+                                                             "proxy.config.http.parent_proxy.retry_time",
+                                                             "proxy.config.http.parent_proxy.per_parent_connect_attempts",
+                                                             "proxy.config.http.parent_proxy.connect_attempts_timeout",
+                                                             "proxy.config.http.normalize_ae",
+                                                             "proxy.config.http.insert_forwarded"};
 
 REGRESSION_TEST(SDK_API_OVERRIDABLE_CONFIGS)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
diff --git a/proxy/InkAPITestTool.cc b/proxy/InkAPITestTool.cc
index dd814b1..fc3392a 100644
--- a/proxy/InkAPITestTool.cc
+++ b/proxy/InkAPITestTool.cc
@@ -667,7 +667,7 @@ synclient_txn_write_request(TSCont contp)
   while (ntodo > 0) {
     block     = TSIOBufferStart(txn->req_buffer);
     ptr_block = TSIOBufferBlockWriteStart(block, &avail);
-    towrite   = MIN(ntodo, avail);
+    towrite   = std::min(ntodo, avail);
     memcpy(ptr_block, txn->request + ndone, towrite);
     TSIOBufferProduce(txn->req_buffer, towrite);
     ntodo -= towrite;
@@ -962,7 +962,7 @@ synserver_txn_write_response(TSCont contp)
   while (ntodo > 0) {
     block     = TSIOBufferStart(txn->resp_buffer);
     ptr_block = TSIOBufferBlockWriteStart(block, &avail);
-    towrite   = MIN(ntodo, avail);
+    towrite   = std::min(ntodo, avail);
     memcpy(ptr_block, response + ndone, towrite);
     TSIOBufferProduce(txn->resp_buffer, towrite);
     ntodo -= towrite;
diff --git a/proxy/Main.cc b/proxy/Main.cc
index 3557805..12f83df 100644
--- a/proxy/Main.cc
+++ b/proxy/Main.cc
@@ -37,6 +37,7 @@
 #include "ts/ink_stack_trace.h"
 #include "ts/ink_syslog.h"
 #include "ts/hugepages.h"
+#include "ts/runroot.cc"
 
 #include "api/ts/ts.h" // This is sadly needed because of us using TSThreadInit() for some reason.
 
@@ -214,6 +215,7 @@ static ArgumentDescription argument_descriptions[] = {
   {"poll_timeout", 't', "poll timeout in milliseconds", "I", &poll_timeout, nullptr, nullptr},
   HELP_ARGUMENT_DESCRIPTION(),
   VERSION_ARGUMENT_DESCRIPTION(),
+  RUNROOT_ARGUMENT_DESCRIPTION(),
 };
 
 struct AutoStopCont : public Continuation {
@@ -268,8 +270,8 @@ public:
       Debug("log", "received SIGUSR2, reloading traffic.out");
 
       // reload output logfile (file is usually called traffic.out)
-      diags->set_stdout_output(bind_stdout);
-      diags->set_stderr_output(bind_stderr);
+      diags->set_std_output(StdStream::STDOUT, bind_stdout);
+      diags->set_std_output(StdStream::STDERR, bind_stderr);
     }
 
     if (signal_received[SIGTERM] || signal_received[SIGINT]) {
@@ -382,8 +384,11 @@ public:
   {
     memset(&_usage, 0, sizeof(_usage));
     SET_HANDLER(&MemoryLimit::periodic);
+    RecRegisterStatInt(RECT_PROCESS, "proxy.process.traffic_server.memory.rss", static_cast<RecInt>(0), RECP_NON_PERSISTENT);
   }
+
   ~MemoryLimit() override { mutex = nullptr; }
+
   int
   periodic(int event, Event *e)
   {
@@ -393,14 +398,15 @@ public:
       delete this;
       return EVENT_DONE;
     }
-    if (_memory_limit == 0) {
-      // first time it has been run
-      _memory_limit = REC_ConfigReadInteger("proxy.config.memory.max_usage");
-      _memory_limit = _memory_limit >> 10; // divide by 1024
-    }
-    if (_memory_limit > 0) {
-      if (getrusage(RUSAGE_SELF, &_usage) == 0) {
-        Debug("server", "memory usage - ru_maxrss: %ld memory limit: %" PRId64, _usage.ru_maxrss, _memory_limit);
+
+    // "reload" the setting, we don't do this often so not expensive
+    _memory_limit = REC_ConfigReadInteger("proxy.config.memory.max_usage");
+    _memory_limit = _memory_limit >> 10; // divide by 1024
+
+    if (getrusage(RUSAGE_SELF, &_usage) == 0) {
+      RecSetRecordInt("proxy.process.traffic_server.memory.rss", _usage.ru_maxrss << 10, REC_SOURCE_DEFAULT); // * 1024
+      Debug("server", "memory usage - ru_maxrss: %ld memory limit: %" PRId64, _usage.ru_maxrss, _memory_limit);
+      if (_memory_limit > 0) {
         if (_usage.ru_maxrss > _memory_limit) {
           if (net_memory_throttle == false) {
             net_memory_throttle = true;
@@ -412,13 +418,13 @@ public:
             Debug("server", "memory usage under limit - ru_maxrss: %ld memory limit: %" PRId64, _usage.ru_maxrss, _memory_limit);
           }
         }
+      } else {
+        // this feature has not been enabled
+        Debug("server", "limiting connections based on memory usage has been disabled");
+        e->cancel();
+        delete this;
+        return EVENT_DONE;
       }
-    } else {
-      // this feature has not be enabled
-      Debug("server", "limiting connections based on memory usage has been disabled");
-      e->cancel();
-      delete this;
-      return EVENT_DONE;
     }
     return EVENT_CONT;
   }
@@ -511,19 +517,19 @@ init_system()
 static void
 check_lockfile()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str lockfile;
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string lockfile;
   pid_t holding_pid;
   int err;
 
   lockfile = Layout::relative_to(rundir, SERVER_LOCK);
 
-  Lockfile server_lockfile(lockfile);
+  Lockfile server_lockfile(lockfile.c_str());
   err = server_lockfile.Get(&holding_pid);
 
   if (err != 1) {
     char *reason = strerror(-err);
-    fprintf(stderr, "WARNING: Can't acquire lockfile '%s'", (const char *)lockfile);
+    fprintf(stderr, "WARNING: Can't acquire lockfile '%s'", lockfile.c_str());
 
     if ((err == 0) && (holding_pid != -1)) {
       fprintf(stderr, " (Lock file held by process ID %ld)\n", (long)holding_pid);
@@ -541,17 +547,17 @@ check_lockfile()
 static void
 check_config_directories()
 {
-  ats_scoped_str rundir(RecConfigReadRuntimeDir());
-  ats_scoped_str sysconfdir(RecConfigReadConfigDir());
+  std::string rundir(RecConfigReadRuntimeDir());
+  std::string sysconfdir(RecConfigReadConfigDir());
 
-  if (access(sysconfdir, R_OK) == -1) {
-    fprintf(stderr, "unable to access() config dir '%s': %d, %s\n", (const char *)sysconfdir, errno, strerror(errno));
+  if (access(sysconfdir.c_str(), R_OK) == -1) {
+    fprintf(stderr, "unable to access() config dir '%s': %d, %s\n", sysconfdir.c_str(), errno, strerror(errno));
     fprintf(stderr, "please set the 'TS_ROOT' environment variable\n");
     ::exit(1);
   }
 
-  if (access(rundir, R_OK | W_OK) == -1) {
-    fprintf(stderr, "unable to access() local state dir '%s': %d, %s\n", (const char *)rundir, errno, strerror(errno));
+  if (access(rundir.c_str(), R_OK | W_OK) == -1) {
+    fprintf(stderr, "unable to access() local state dir '%s': %d, %s\n", rundir.c_str(), errno, strerror(errno));
     fprintf(stderr, "please set 'proxy.config.local_state_dir'\n");
     ::exit(1);
   }
@@ -770,12 +776,12 @@ cmd_clear(char *cmd)
   bool c_cache = !strcmp(cmd, "clear_cache");
 
   if (c_all || c_hdb) {
-    ats_scoped_str rundir(RecConfigReadRuntimeDir());
-    ats_scoped_str config(Layout::relative_to(rundir, "hostdb.config"));
+    std::string rundir(RecConfigReadRuntimeDir());
+    std::string config(Layout::relative_to(rundir, "hostdb.config"));
 
     Note("Clearing HostDB Configuration");
-    if (unlink(config) < 0) {
-      Note("unable to unlink %s", (const char *)config);
+    if (unlink(config.c_str()) < 0) {
+      Note("unable to unlink %s", config.c_str());
     }
   }
 
@@ -1358,15 +1364,15 @@ run_RegressionTest()
 static void
 chdir_root()
 {
-  const char *prefix = Layout::get()->prefix;
+  std::string prefix = Layout::get()->prefix;
 
-  if (chdir(prefix) < 0) {
-    fprintf(stderr, "%s: unable to change to root directory \"%s\" [%d '%s']\n", appVersionInfo.AppStr, prefix, errno,
+  if (chdir(prefix.c_str()) < 0) {
+    fprintf(stderr, "%s: unable to change to root directory \"%s\" [%d '%s']\n", appVersionInfo.AppStr, prefix.c_str(), errno,
             strerror(errno));
     fprintf(stderr, "%s: please correct the path or set the TS_ROOT environment variable\n", appVersionInfo.AppStr);
     ::exit(1);
   } else {
-    printf("%s: using root directory '%s'\n", appVersionInfo.AppStr, prefix);
+    printf("%s: using root directory '%s'\n", appVersionInfo.AppStr, prefix.c_str());
   }
 }
 
@@ -1524,6 +1530,7 @@ main(int /* argc ATS_UNUSED */, const char **argv)
   // Define the version info
   appVersionInfo.setup(PACKAGE_NAME, "traffic_server", PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, "");
 
+  runroot_handler(argv);
   // Before accessing file system initialize Layout engine
   Layout::create();
   chdir_root(); // change directory to the install root of traffic server.
@@ -1564,8 +1571,8 @@ main(int /* argc ATS_UNUSED */, const char **argv)
   // related errors and if diagsConfig isn't get up yet that will crash on a NULL pointer.
   diagsConfig = new DiagsConfig("Server", DIAGS_LOG_FILENAME, error_tags, action_tags, false);
   diags       = diagsConfig->diags;
-  diags->set_stdout_output(bind_stdout);
-  diags->set_stderr_output(bind_stderr);
+  diags->set_std_output(StdStream::STDOUT, bind_stdout);
+  diags->set_std_output(StdStream::STDERR, bind_stderr);
   if (is_debug_tag_set("diags")) {
     diags->dump();
   }
@@ -1651,8 +1658,8 @@ main(int /* argc ATS_UNUSED */, const char **argv)
   diagsConfig          = new DiagsConfig("Server", DIAGS_LOG_FILENAME, error_tags, action_tags, true);
   diags                = diagsConfig->diags;
   RecSetDiags(diags);
-  diags->set_stdout_output(bind_stdout);
-  diags->set_stderr_output(bind_stderr);
+  diags->set_std_output(StdStream::STDOUT, bind_stdout);
+  diags->set_std_output(StdStream::STDERR, bind_stderr);
   if (is_debug_tag_set("diags")) {
     diags->dump();
   }
@@ -1783,7 +1790,7 @@ main(int /* argc ATS_UNUSED */, const char **argv)
 
   eventProcessor.schedule_every(new SignalContinuation, HRTIME_MSECOND * 500, ET_CALL);
   eventProcessor.schedule_every(new DiagsLogContinuation, HRTIME_SECOND, ET_TASK);
-  eventProcessor.schedule_every(new MemoryLimit, HRTIME_SECOND, ET_TASK);
+  eventProcessor.schedule_every(new MemoryLimit, HRTIME_SECOND * 10, ET_TASK);
   REC_RegisterConfigUpdateFunc("proxy.config.dump_mem_info_frequency", init_memory_tracker, nullptr);
   init_memory_tracker(nullptr, RECD_NULL, RecData(), nullptr);
 
diff --git a/proxy/ParentSelection.cc b/proxy/ParentSelection.cc
index 5f19141..2d64d24 100644
--- a/proxy/ParentSelection.cc
+++ b/proxy/ParentSelection.cc
@@ -29,6 +29,7 @@
 #include "ProxyConfig.h"
 #include "HTTP.h"
 #include "HttpTransact.h"
+#include "I_Machine.h"
 
 #define MAX_SIMPLE_RETRIES 5
 #define MAX_UNAVAILABLE_SERVER_RETRIES 5
@@ -333,6 +334,44 @@ UnavailableServerResponseCodes::UnavailableServerResponseCodes(char *val)
   std::sort(codes.begin(), codes.end());
 }
 
+void
+ParentRecord::PreProcessParents(const char *val, const int line_num, char *buf, size_t len)
+{
+  char *_val                      = static_cast<char *>(ats_strndup(val, strlen(val)));
+  char fqdn[TS_MAX_HOST_NAME_LEN] = {0}, *nm, *token, *savePtr;
+  std::string str;
+  Machine *machine = Machine::instance();
+
+  strncpy(_val, val, strlen(val));
+
+  token = strtok_r(_val, ";", &savePtr);
+  while (token != nullptr) {
+    if ((nm = strchr(token, ':')) != nullptr) {
+      size_t len = (nm - token);
+      ink_assert(len < sizeof(fqdn));
+      memset(fqdn, 0, sizeof(fqdn));
+      strncpy(fqdn, token, len);
+      if (machine->is_self(fqdn)) {
+        Debug("parent_select", "token: %s, matches this machine.  Removing self from parent list at line %d", fqdn, line_num);
+        token = strtok_r(nullptr, ";", &savePtr);
+        continue;
+      }
+    } else {
+      if (machine->is_self(token)) {
+        Debug("parent_select", "token: %s, matches this machine.  Removing self from parent list at line %d", token, line_num);
+        token = strtok_r(nullptr, ";", &savePtr);
+        continue;
+      }
+    }
+
+    str += token;
+    str += ";";
+    token = strtok_r(nullptr, ";", &savePtr);
+  }
+  strncpy(buf, str.c_str(), len);
+  ats_free(_val);
+}
+
 // const char* ParentRecord::ProcessParents(char* val, bool isPrimary)
 //
 //   Reads in the value of a "round-robin" or "order"
@@ -524,6 +563,7 @@ ParentRecord::Init(matcher_line *line_info)
   const char *tmp;
   char *label;
   char *val;
+  char parent_buf[16384] = {0};
   bool used              = false;
   ParentRR_t round_robin = P_NO_ROUND_ROBIN;
   char buf[128];
@@ -557,10 +597,12 @@ ParentRecord::Init(matcher_line *line_info)
       }
       used = true;
     } else if (strcasecmp(label, "parent") == 0 || strcasecmp(label, "primary_parent") == 0) {
-      errPtr = ProcessParents(val, true);
+      PreProcessParents(val, line_num, parent_buf, sizeof(parent_buf) - 1);
+      errPtr = ProcessParents(parent_buf, true);
       used   = true;
     } else if (strcasecmp(label, "secondary_parent") == 0) {
-      errPtr = ProcessParents(val, false);
+      PreProcessParents(val, line_num, parent_buf, sizeof(parent_buf) - 1);
+      errPtr = ProcessParents(parent_buf, false);
       used   = true;
     } else if (strcasecmp(label, "go_direct") == 0) {
       if (strcasecmp(val, "false") == 0) {
diff --git a/proxy/ParentSelection.h b/proxy/ParentSelection.h
index 62ef8db..57d842e 100644
--- a/proxy/ParentSelection.h
+++ b/proxy/ParentSelection.h
@@ -134,6 +134,7 @@ public:
 
   const char *scheme = nullptr;
   // private:
+  void PreProcessParents(const char *val, const int line_num, char *buf, size_t len);
   const char *ProcessParents(char *val, bool isPrimary);
   bool ignore_query                                                  = false;
   volatile uint32_t rr_next                                          = 0;
diff --git a/proxy/Plugin.cc b/proxy/Plugin.cc
index 240574b..587ec4d 100644
--- a/proxy/Plugin.cc
+++ b/proxy/Plugin.cc
@@ -231,7 +231,7 @@ plugin_init(bool validateOnly)
 
   if (INIT_ONCE) {
     api_init();
-    plugin_dir = RecConfigReadPluginDir();
+    plugin_dir = ats_stringdup(RecConfigReadPluginDir());
     INIT_ONCE  = false;
   }
 
@@ -299,8 +299,12 @@ plugin_init(bool validateOnly)
         argv[i] = vars[i];
       }
     }
-    argv[argc] = nullptr;
 
+    if (argc < MAX_PLUGIN_ARGS) {
+      argv[argc] = nullptr;
+    } else {
+      argv[MAX_PLUGIN_ARGS - 1] = nullptr;
+    }
     retVal = plugin_load(argc, argv, validateOnly);
 
     for (i = 0; i < argc; i++) {
diff --git a/proxy/PluginVC.cc b/proxy/PluginVC.cc
index 7a377e7..e2e2217 100644
--- a/proxy/PluginVC.cc
+++ b/proxy/PluginVC.cc
@@ -432,7 +432,7 @@ PluginVC::transfer_bytes(MIOBuffer *transfer_to, IOBufferReader *transfer_from,
 
   while (act_on > 0) {
     int64_t block_read_avail = transfer_from->block_read_avail();
-    int64_t to_move          = MIN(act_on, block_read_avail);
+    int64_t to_move          = std::min(act_on, block_read_avail);
     int64_t moved            = 0;
 
     if (to_move <= 0) {
@@ -507,7 +507,7 @@ PluginVC::process_write_side(bool other_side_call)
 
   IOBufferReader *reader = write_state.vio.get_reader();
   int64_t bytes_avail    = reader->read_avail();
-  int64_t act_on         = MIN(bytes_avail, ntodo);
+  int64_t act_on         = std::min(bytes_avail, ntodo);
 
   Debug("pvc", "[%u] %s: process_write_side; act_on %" PRId64 "", core_obj->id, PVC_TYPE, act_on);
 
@@ -532,7 +532,7 @@ PluginVC::process_write_side(bool other_side_call)
     Debug("pvc", "[%u] %s: process_write_side no buffer space", core_obj->id, PVC_TYPE);
     return;
   }
-  act_on = MIN(act_on, buf_space);
+  act_on = std::min(act_on, buf_space);
 
   int64_t added = transfer_bytes(core_buffer, reader, act_on);
   if (added < 0) {
@@ -625,7 +625,7 @@ PluginVC::process_read_side(bool other_side_call)
   }
 
   int64_t bytes_avail = core_reader->read_avail();
-  int64_t act_on      = MIN(bytes_avail, ntodo);
+  int64_t act_on      = std::min(bytes_avail, ntodo);
 
   Debug("pvc", "[%u] %s: process_read_side; act_on %" PRId64 "", core_obj->id, PVC_TYPE, act_on);
 
@@ -641,13 +641,13 @@ PluginVC::process_read_side(bool other_side_call)
   MIOBuffer *output_buffer = read_state.vio.get_writer();
 
   int64_t water_mark = output_buffer->water_mark;
-  water_mark         = MAX(water_mark, PVC_DEFAULT_MAX_BYTES);
+  water_mark         = std::max(water_mark, static_cast<int64_t>(PVC_DEFAULT_MAX_BYTES));
   int64_t buf_space  = water_mark - output_buffer->max_read_avail();
   if (buf_space <= 0) {
     Debug("pvc", "[%u] %s: process_read_side no buffer space", core_obj->id, PVC_TYPE);
     return;
   }
-  act_on = MIN(act_on, buf_space);
+  act_on = std::min(act_on, buf_space);
 
   int64_t added = transfer_bytes(output_buffer, core_reader, act_on);
   if (added <= 0) {
diff --git a/proxy/ProxyClientTransaction.h b/proxy/ProxyClientTransaction.h
index b797237..daf8b71 100644
--- a/proxy/ProxyClientTransaction.h
+++ b/proxy/ProxyClientTransaction.h
@@ -251,6 +251,11 @@ public:
     return parent ? parent->protocol_contains(tag_prefix) : nullptr;
   }
 
+  // This function must return a non-negative number that is different for two in-progress transactions with the same parent
+  // session.
+  //
+  virtual int get_transaction_id() const = 0;
+
 protected:
   ProxyClientSession *parent;
   HttpSM *current_reader;
diff --git a/proxy/StatPages.cc b/proxy/StatPages.cc
index b48946f..db22090 100644
--- a/proxy/StatPages.cc
+++ b/proxy/StatPages.cc
@@ -49,17 +49,20 @@ static volatile int n_stat_pages = 0;
 void
 StatPagesManager::init()
 {
+  ink_mutex_init(&stat_pages_mutex);
   REC_EstablishStaticConfigInt32(m_enabled, "proxy.config.http_ui_enabled");
 }
 
 void
 StatPagesManager::register_http(const char *module, StatPagesFunc func)
 {
+  ink_mutex_acquire(&stat_pages_mutex);
   ink_release_assert(n_stat_pages < MAX_STAT_PAGES);
 
   stat_pages[n_stat_pages].module = (char *)ats_malloc(strlen(module) + 3);
   snprintf(stat_pages[n_stat_pages].module, strlen(module) + 3, "{%s}", module);
   stat_pages[n_stat_pages++].func = func;
+  ink_mutex_release(&stat_pages_mutex);
 }
 
 Action *
diff --git a/proxy/StatPages.h b/proxy/StatPages.h
index ba78ec7..3235b65 100644
--- a/proxy/StatPages.h
+++ b/proxy/StatPages.h
@@ -84,6 +84,7 @@ struct StatPagesManager {
   bool is_stat_page(URL *url);
   bool is_cache_inspector_page(URL *url);
   int m_enabled;
+  ink_mutex stat_pages_mutex;
 };
 
 inkcoreapi extern StatPagesManager statPagesManager;
diff --git a/proxy/config/records.config.default.in b/proxy/config/records.config.default.in
index 94d7226..27ff4ac 100644
--- a/proxy/config/records.config.default.in
+++ b/proxy/config/records.config.default.in
@@ -97,7 +97,7 @@ CONFIG proxy.config.http.cache.http INT 1
 #    https://docs.trafficserver.apache.org/en/latest/admin-guide/files/cache.config.en.html
 ##############################################################################
 CONFIG proxy.config.http.cache.ignore_client_cc_max_age INT 1
-CONFIG proxy.config.http.normalize_ae_gzip INT 1
+CONFIG proxy.config.http.normalize_ae INT 1
 CONFIG proxy.config.http.cache.cache_responses_to_cookies INT 1
 CONFIG proxy.config.http.cache.cache_urls_that_look_dynamic INT 1
     # https://docs.trafficserver.apache.org/records.config#proxy-config-http-cache-when-to-revalidate
diff --git a/proxy/hdrs/HTTP.cc b/proxy/hdrs/HTTP.cc
index a6746a1..d6be78d 100644
--- a/proxy/hdrs/HTTP.cc
+++ b/proxy/hdrs/HTTP.cc
@@ -1544,6 +1544,7 @@ HTTPHdr::_fill_target_cache() const
 {
   URL *url = this->url_get();
   const char *port_ptr;
+  int port_len;
 
   m_target_in_url  = false;
   m_port_in_header = false;
@@ -1555,10 +1556,10 @@ HTTPHdr::_fill_target_cache() const
     m_port_in_header = 0 != url->port_get_raw();
     m_host_mime      = nullptr;
   } else if (nullptr !=
-             (m_host_mime = const_cast<HTTPHdr *>(this)->get_host_port_values(nullptr, &m_host_length, &port_ptr, nullptr))) {
+             (m_host_mime = const_cast<HTTPHdr *>(this)->get_host_port_values(nullptr, &m_host_length, &port_ptr, &port_len))) {
     m_port = 0;
     if (port_ptr) {
-      for (; is_digit(*port_ptr); ++port_ptr) {
+      for (; port_len > 0 && is_digit(*port_ptr); ++port_ptr, --port_len) {
         m_port = m_port * 10 + *port_ptr - '0';
       }
     }
@@ -1808,6 +1809,8 @@ ClassAllocator<HTTPCacheAlt> httpCacheAltAllocator("httpCacheAltAllocator");
 
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
+int constexpr HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS;
+
 HTTPCacheAlt::HTTPCacheAlt()
   : m_magic(CACHE_ALT_MAGIC_ALIVE),
     m_writeable(1),
diff --git a/proxy/hdrs/HTTP.h b/proxy/hdrs/HTTP.h
index 00ad4e6..4c599fd 100644
--- a/proxy/hdrs/HTTP.h
+++ b/proxy/hdrs/HTTP.h
@@ -1338,7 +1338,7 @@ struct HTTPCacheAlt {
   /// for the last fragment.
   FragOffset *m_frag_offsets;
   /// # of fragment offsets built in to object.
-  static int const N_INTEGRAL_FRAG_OFFSETS = 4;
+  static int constexpr N_INTEGRAL_FRAG_OFFSETS = 4;
   /// Integral fragment offset table.
   FragOffset m_integral_frag_offsets[N_INTEGRAL_FRAG_OFFSETS];
 
diff --git a/proxy/hdrs/HdrToken.cc b/proxy/hdrs/HdrToken.cc
index 8cb1f3b..6f8064c 100644
--- a/proxy/hdrs/HdrToken.cc
+++ b/proxy/hdrs/HdrToken.cc
@@ -108,7 +108,9 @@ static const char *_hdrtoken_strs[] = {
 
   // Header extensions
   "X-ID", "X-Forwarded-For", "TE", "Strict-Transport-Security", "100-continue",
-};
+
+  // RFC-2739
+  "Forwarded"};
 
 static HdrTokenTypeBinding _hdrtoken_strs_type_initializers[] = {
   {"file", HDRTOKEN_TYPE_SCHEME},
@@ -233,6 +235,7 @@ static HdrTokenFieldInfo _hdrtoken_strs_field_initializers[] = {
   {"Xref", MIME_SLOTID_NONE, MIME_PRESENCE_XREF, HTIF_NONE},
   {"X-ID", MIME_SLOTID_NONE, MIME_PRESENCE_NONE, (HTIF_COMMAS | HTIF_MULTVALS | HTIF_HOPBYHOP)},
   {"X-Forwarded-For", MIME_SLOTID_NONE, MIME_PRESENCE_NONE, (HTIF_COMMAS | HTIF_MULTVALS)},
+  {"Forwarded", MIME_SLOTID_NONE, MIME_PRESENCE_NONE, (HTIF_COMMAS | HTIF_MULTVALS)},
   {"Sec-WebSocket-Key", MIME_SLOTID_NONE, MIME_PRESENCE_NONE, HTIF_NONE},
   {"Sec-WebSocket-Version", MIME_SLOTID_NONE, MIME_PRESENCE_NONE, HTIF_NONE},
   {nullptr, 0, 0, 0},
@@ -291,6 +294,9 @@ hdrtoken_hash(const unsigned char *string, unsigned int length)
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
+// WARNING:  Indexes into this array are stored on disk for cached objects.  New strings must be added at the end of the array to
+// avoid changing the indexes of pre-existing entries, unless the cache format version number is increased.
+//
 static const char *_hdrtoken_commonly_tokenized_strs[] = {
   // MIME Field names
   "Accept-Charset", "Accept-Encoding", "Accept-Language", "Accept-Ranges", "Accept", "Age", "Allow",
@@ -352,7 +358,9 @@ static const char *_hdrtoken_commonly_tokenized_strs[] = {
 
   // Header extensions
   "X-ID", "X-Forwarded-For", "TE", "Strict-Transport-Security", "100-continue",
-};
+
+  // RFC-2739
+  "Forwarded"};
 
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
diff --git a/proxy/hdrs/HttpCompat.cc b/proxy/hdrs/HttpCompat.cc
index 6f69758..89eaa63 100644
--- a/proxy/hdrs/HttpCompat.cc
+++ b/proxy/hdrs/HttpCompat.cc
@@ -851,7 +851,7 @@ HttpCompat::determine_set_by_language(RawHashTable *table_of_sets, StrList *acpt
       // index, preferring values earlier in Accept-Language list.   //
       /////////////////////////////////////////////////////////////////
 
-      Q = min(Ql, Qc);
+      Q = std::min(Ql, Qc);
 
       //////////////////////////////////////////////////////////
       // normally the Q for default pages should be slightly  //
diff --git a/proxy/hdrs/MIME.cc b/proxy/hdrs/MIME.cc
index 418c34a..18de0f6 100644
--- a/proxy/hdrs/MIME.cc
+++ b/proxy/hdrs/MIME.cc
@@ -148,6 +148,7 @@ const char *MIME_FIELD_XREF;
 const char *MIME_FIELD_ATS_INTERNAL;
 const char *MIME_FIELD_X_ID;
 const char *MIME_FIELD_X_FORWARDED_FOR;
+const char *MIME_FIELD_FORWARDED;
 const char *MIME_FIELD_SEC_WEBSOCKET_KEY;
 const char *MIME_FIELD_SEC_WEBSOCKET_VERSION;
 const char *MIME_FIELD_HTTP2_SETTINGS;
@@ -263,6 +264,7 @@ int MIME_LEN_XREF;
 int MIME_LEN_ATS_INTERNAL;
 int MIME_LEN_X_ID;
 int MIME_LEN_X_FORWARDED_FOR;
+int MIME_LEN_FORWARDED;
 int MIME_LEN_SEC_WEBSOCKET_KEY;
 int MIME_LEN_SEC_WEBSOCKET_VERSION;
 int MIME_LEN_HTTP2_SETTINGS;
@@ -341,6 +343,7 @@ int MIME_WKSIDX_XREF;
 int MIME_WKSIDX_ATS_INTERNAL;
 int MIME_WKSIDX_X_ID;
 int MIME_WKSIDX_X_FORWARDED_FOR;
+int MIME_WKSIDX_FORWARDED;
 int MIME_WKSIDX_SEC_WEBSOCKET_KEY;
 int MIME_WKSIDX_SEC_WEBSOCKET_VERSION;
 int MIME_WKSIDX_HTTP2_SETTINGS;
@@ -733,6 +736,7 @@ mime_init()
     MIME_FIELD_ATS_INTERNAL              = hdrtoken_string_to_wks("@Ats-Internal");
     MIME_FIELD_X_ID                      = hdrtoken_string_to_wks("X-ID");
     MIME_FIELD_X_FORWARDED_FOR           = hdrtoken_string_to_wks("X-Forwarded-For");
+    MIME_FIELD_FORWARDED                 = hdrtoken_string_to_wks("Forwarded");
 
     MIME_FIELD_SEC_WEBSOCKET_KEY     = hdrtoken_string_to_wks("Sec-WebSocket-Key");
     MIME_FIELD_SEC_WEBSOCKET_VERSION = hdrtoken_string_to_wks("Sec-WebSocket-Version");
@@ -813,6 +817,7 @@ mime_init()
     MIME_LEN_ATS_INTERNAL              = hdrtoken_wks_to_length(MIME_FIELD_ATS_INTERNAL);
     MIME_LEN_X_ID                      = hdrtoken_wks_to_length(MIME_FIELD_X_ID);
     MIME_LEN_X_FORWARDED_FOR           = hdrtoken_wks_to_length(MIME_FIELD_X_FORWARDED_FOR);
+    MIME_LEN_FORWARDED                 = hdrtoken_wks_to_length(MIME_FIELD_FORWARDED);
 
     MIME_LEN_SEC_WEBSOCKET_KEY     = hdrtoken_wks_to_length(MIME_FIELD_SEC_WEBSOCKET_KEY);
     MIME_LEN_SEC_WEBSOCKET_VERSION = hdrtoken_wks_to_length(MIME_FIELD_SEC_WEBSOCKET_VERSION);
@@ -892,6 +897,7 @@ mime_init()
     MIME_WKSIDX_XREF                      = hdrtoken_wks_to_index(MIME_FIELD_XREF);
     MIME_WKSIDX_X_ID                      = hdrtoken_wks_to_index(MIME_FIELD_X_ID);
     MIME_WKSIDX_X_FORWARDED_FOR           = hdrtoken_wks_to_index(MIME_FIELD_X_FORWARDED_FOR);
+    MIME_WKSIDX_FORWARDED                 = hdrtoken_wks_to_index(MIME_FIELD_FORWARDED);
     MIME_WKSIDX_SEC_WEBSOCKET_KEY         = hdrtoken_wks_to_index(MIME_FIELD_SEC_WEBSOCKET_KEY);
     MIME_WKSIDX_SEC_WEBSOCKET_VERSION     = hdrtoken_wks_to_index(MIME_FIELD_SEC_WEBSOCKET_VERSION);
     MIME_WKSIDX_HTTP2_SETTINGS            = hdrtoken_wks_to_index(MIME_FIELD_HTTP2_SETTINGS);
@@ -2802,7 +2808,7 @@ mime_mem_print(const char *src_d, int src_l, char *buf_start, int buf_length, in
     }
   }
 
-  copy_l = min(buf_length - *buf_index_inout, src_l);
+  copy_l = std::min(buf_length - *buf_index_inout, src_l);
   if (copy_l > 0) {
     memcpy(buf_start + *buf_index_inout, src_d, copy_l);
     *buf_index_inout += copy_l;
diff --git a/proxy/hdrs/MIME.h b/proxy/hdrs/MIME.h
index 7df16ec..207cb15 100644
--- a/proxy/hdrs/MIME.h
+++ b/proxy/hdrs/MIME.h
@@ -388,6 +388,7 @@ extern const char *MIME_FIELD_XREF;
 extern const char *MIME_FIELD_ATS_INTERNAL;
 extern const char *MIME_FIELD_X_ID;
 extern const char *MIME_FIELD_X_FORWARDED_FOR;
+extern const char *MIME_FIELD_FORWARDED;
 extern const char *MIME_FIELD_SEC_WEBSOCKET_KEY;
 extern const char *MIME_FIELD_SEC_WEBSOCKET_VERSION;
 extern const char *MIME_FIELD_HTTP2_SETTINGS;
@@ -491,6 +492,7 @@ extern int MIME_LEN_XREF;
 extern int MIME_LEN_ATS_INTERNAL;
 extern int MIME_LEN_X_ID;
 extern int MIME_LEN_X_FORWARDED_FOR;
+extern int MIME_LEN_FORWARDED;
 
 extern int MIME_LEN_BYTES;
 extern int MIME_LEN_CHUNKED;
diff --git a/proxy/hdrs/Makefile.am b/proxy/hdrs/Makefile.am
index d82aab9..c2503f3 100644
--- a/proxy/hdrs/Makefile.am
+++ b/proxy/hdrs/Makefile.am
@@ -71,7 +71,7 @@ test_mime_LDADD = -L. -lhdrs \
   $(top_builddir)/mgmt/libmgmt_p.la \
   $(top_builddir)/proxy/shared/libUglyLogStubs.a \
   @HWLOC_LIBS@ \
-  @LIBTCL@
+  @LIBTCL@ @LIBCAP@
 
 test_mime_SOURCES = test_mime.cc
 
diff --git a/proxy/http/ForwardedConfig.cc b/proxy/http/ForwardedConfig.cc
new file mode 100644
index 0000000..27ccb2f
--- /dev/null
+++ b/proxy/http/ForwardedConfig.cc
@@ -0,0 +1,189 @@
+/** @file
+
+  Configuration of Forwarded HTTP header option.
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ */
+
+#include <bitset>
+#include <string>
+#include <cctype>
+
+#include <ts/string_view.h>
+#include <ts/MemView.h>
+
+#include <HttpConfig.h>
+
+namespace
+{
+class BadOptionsErrMsg
+{
+public:
+  // Construct with referece to string that will contain error message.
+  //
+  BadOptionsErrMsg(ts::FixedBufferWriter &err) : _err(err), _count(0) {}
+
+  // Add a bad option.
+  //
+  void
+  add(ts::StringView badOpt)
+  {
+    if (_count == 0) {
+      _err << "\"Forwarded\" configuration: ";
+      _addQuoted(badOpt);
+      _count = 1;
+    } else if (_count == 1) {
+      _saveLast = badOpt;
+      _count    = 2;
+    } else {
+      _err << ", ";
+      _addQuoted(_saveLast);
+      _saveLast = badOpt;
+      ++_count;
+    }
+  }
+
+  // Returns true it error seen.
+  //
+  bool
+  done()
+  {
+    if (_count == 0) {
+      return false;
+    }
+
+    if (_count == 1) {
+      _err << " is a bad option.";
+
+    } else if (_count != 0) {
+      _err << " and ";
+      _addQuoted(_saveLast);
+      _err << " are bad options.";
+    }
+    return true;
+  }
+
+private:
+  void
+  _addQuoted(ts::StringView sv)
+  {
+    _err << '\"' << ts::string_view(sv.begin(), sv.size()) << '\"';
+  }
+
+  ts::FixedBufferWriter &_err;
+
+  ts::StringView _saveLast;
+
+  int _count;
+};
+
+// Compare a StringView to a nul-termimated string, converting the StringView to lower case and ignoring whitespace in it.
+//
+bool
+eqIgnoreCaseWs(ts::StringView sv, const char *target)
+{
+  const char *s = sv.begin();
+
+  std::size_t skip = 0;
+  std::size_t i    = 0;
+
+  while ((i + skip) < sv.size()) {
+    if (std::isspace(s[i + skip])) {
+      ++skip;
+    } else if (std::tolower(s[i + skip]) != target[i]) {
+      return false;
+    } else {
+      ++i;
+    }
+  }
+
+  return target[i] == '\0';
+}
+
+} // end anonymous namespace
+
+namespace HttpForwarded
+{
+OptionBitSet
+optStrToBitset(ts::string_view optConfigStr, ts::FixedBufferWriter &error)
+{
+  const ts::StringView Delimiters(":|");
+
+  OptionBitSet optBS;
+
+  // Convert to TS StringView to be able to use parsing members.
+  //
+  ts::StringView oCS(optConfigStr.data(), optConfigStr.size());
+
+  if (eqIgnoreCaseWs(oCS, "none")) {
+    return OptionBitSet();
+  }
+
+  BadOptionsErrMsg em(error);
+
+  do {
+    ts::StringView optStr = oCS.extractPrefix(Delimiters);
+
+    if (eqIgnoreCaseWs(optStr, "for")) {
+      optBS.set(FOR);
+
+    } else if (eqIgnoreCaseWs(optStr, "by=ip")) {
+      optBS.set(BY_IP);
+
+    } else if (eqIgnoreCaseWs(optStr, "by=unknown")) {
+      optBS.set(BY_UNKNOWN);
+
+    } else if (eqIgnoreCaseWs(optStr, "by=servername")) {
+      optBS.set(BY_SERVER_NAME);
+
+    } else if (eqIgnoreCaseWs(optStr, "by=uuid")) {
+      optBS.set(BY_UUID);
+
+    } else if (eqIgnoreCaseWs(optStr, "proto")) {
+      optBS.set(PROTO);
+
+    } else if (eqIgnoreCaseWs(optStr, "host")) {
+      optBS.set(HOST);
+
+    } else if (eqIgnoreCaseWs(optStr, "connection=compact")) {
+      optBS.set(CONNECTION_COMPACT);
+
+    } else if (eqIgnoreCaseWs(optStr, "connection=std")) {
+      optBS.set(CONNECTION_STD);
+
+    } else if (eqIgnoreCaseWs(optStr, "connection=standard")) {
+      optBS.set(CONNECTION_STD);
+
+    } else if (eqIgnoreCaseWs(optStr, "connection=full")) {
+      optBS.set(CONNECTION_FULL);
+
+    } else {
+      em.add(optStr);
+    }
+  } while (oCS);
+
+  if (em.done()) {
+    return OptionBitSet();
+  }
+
+  return optBS;
+
+} // end optStrToBitset()
+
+} // end namespace HttpForwarded
diff --git a/proxy/http/Http1ClientSession.cc b/proxy/http/Http1ClientSession.cc
index 14a9de0..05a6d72 100644
--- a/proxy/http/Http1ClientSession.cc
+++ b/proxy/http/Http1ClientSession.cc
@@ -451,7 +451,7 @@ Http1ClientSession::reenable(VIO *vio)
 void
 Http1ClientSession::release(ProxyClientTransaction *trans)
 {
-  ink_assert(read_state == HCS_ACTIVE_READER);
+  ink_assert(read_state == HCS_ACTIVE_READER || read_state == HCS_INIT);
 
   // Clean up the write VIO in case of inactivity timeout
   this->do_io_write(nullptr, 0, nullptr);
diff --git a/proxy/http/Http1ClientSession.h b/proxy/http/Http1ClientSession.h
index 829baf6..458e432 100644
--- a/proxy/http/Http1ClientSession.h
+++ b/proxy/http/Http1ClientSession.h
@@ -62,8 +62,8 @@ public:
   virtual void
   start()
   {
-    // Create a new transaction object and kick it off
-    this->new_transaction();
+    // Troll for data to get a new transaction
+    this->release(&trans);
   }
 
   void new_connection(NetVConnection *new_vc, MIOBuffer *iobuf, IOBufferReader *reader, bool backdoor);
diff --git a/proxy/http/Http1ClientTransaction.h b/proxy/http/Http1ClientTransaction.h
index 7547f6b..fb23bc7 100644
--- a/proxy/http/Http1ClientTransaction.h
+++ b/proxy/http/Http1ClientTransaction.h
@@ -169,6 +169,16 @@ public:
   }
   void transaction_done() override;
 
+  int
+  get_transaction_id() const override
+  {
+    // For HTTP/1 there is only one on-going transaction at a time per session/connection.  Therefore, the transaction count can be
+    // presumed not to increase during the lifetime of a transaction, thus this function will return a consistent unique transaction
+    // identifier.
+    //
+    return get_transact_count();
+  }
+
 protected:
   uint16_t outbound_port;
   IpAddr outbound_ip4;
diff --git a/proxy/http/HttpConfig.cc b/proxy/http/HttpConfig.cc
index d9a9828..1ab8796 100644
--- a/proxy/http/HttpConfig.cc
+++ b/proxy/http/HttpConfig.cc
@@ -179,6 +179,33 @@ http_server_session_sharing_cb(const char *name, RecDataT dtype, RecData data, v
   return REC_ERR_OKAY;
 }
 
+static int
+http_insert_forwarded_cb(const char *name, RecDataT dtype, RecData data, void *cookie)
+{
+  bool valid_p        = false;
+  HttpConfigParams *c = static_cast<HttpConfigParams *>(cookie);
+
+  if (0 == strcasecmp("proxy.config.http.insert_forwarded", name)) {
+    if (RECD_STRING == dtype) {
+      ts::LocalBufferWriter<1024> error;
+      HttpForwarded::OptionBitSet bs = HttpForwarded::optStrToBitset(ts::string_view(data.rec_string), error);
+      if (!error.size()) {
+        c->oride.insert_forwarded = bs;
+        valid_p                   = true;
+      } else {
+        Error("HTTP %.*s", static_cast<int>(error.size()), error.data());
+      }
+    }
+  }
+
+  // Signal an update if valid value arrived.
+  if (valid_p) {
+    http_config_cb(name, dtype, data, cookie);
+  }
+
+  return REC_ERR_OKAY;
+}
+
 void
 register_stat_callbacks()
 {
@@ -938,6 +965,21 @@ HttpConfig::startup()
                         c.oride.server_session_sharing_match);
   http_config_enum_read("proxy.config.http.server_session_sharing.pool", SessionSharingPoolStrings, c.server_session_sharing_pool);
 
+  RecRegisterConfigUpdateCb("proxy.config.http.insert_forwarded", &http_insert_forwarded_cb, &c);
+  {
+    char str[512];
+
+    if (REC_ERR_OKAY == RecGetRecordString("proxy.config.http.insert_forwarded", str, sizeof(str))) {
+      ts::LocalBufferWriter<1024> error;
+      HttpForwarded::OptionBitSet bs = HttpForwarded::optStrToBitset(ts::string_view(str), error);
+      if (!error.size()) {
+        c.oride.insert_forwarded = bs;
+      } else {
+        Error("HTTP %.*s", static_cast<int>(error.size()), error.data());
+      }
+    }
+  }
+
   HttpEstablishStaticConfigByte(c.oride.auth_server_session_private, "proxy.config.http.auth_server_session_private");
 
   HttpEstablishStaticConfigByte(c.oride.keep_alive_post_out, "proxy.config.http.keep_alive_post_out");
@@ -1003,7 +1045,7 @@ HttpConfig::startup()
 
   HttpEstablishStaticConfigByte(c.oride.insert_age_in_response, "proxy.config.http.insert_age_in_response");
   HttpEstablishStaticConfigByte(c.enable_http_stats, "proxy.config.http.enable_http_stats");
-  HttpEstablishStaticConfigByte(c.oride.normalize_ae_gzip, "proxy.config.http.normalize_ae_gzip");
+  HttpEstablishStaticConfigByte(c.oride.normalize_ae, "proxy.config.http.normalize_ae");
 
   HttpEstablishStaticConfigLongLong(c.oride.cache_heuristic_min_lifetime, "proxy.config.http.cache.heuristic_min_lifetime");
   HttpEstablishStaticConfigLongLong(c.oride.cache_heuristic_max_lifetime, "proxy.config.http.cache.heuristic_max_lifetime");
@@ -1278,13 +1320,14 @@ HttpConfig::reconfigure()
   params->oride.proxy_response_server_enabled = m_master.oride.proxy_response_server_enabled;
 
   params->oride.insert_squid_x_forwarded_for = INT_TO_BOOL(m_master.oride.insert_squid_x_forwarded_for);
+  params->oride.insert_forwarded             = m_master.oride.insert_forwarded;
   params->oride.insert_age_in_response       = INT_TO_BOOL(m_master.oride.insert_age_in_response);
   params->enable_http_stats                  = INT_TO_BOOL(m_master.enable_http_stats);
-  params->oride.normalize_ae_gzip            = INT_TO_BOOL(m_master.oride.normalize_ae_gzip);
+  params->oride.normalize_ae                 = m_master.oride.normalize_ae;
 
   params->oride.cache_heuristic_min_lifetime = m_master.oride.cache_heuristic_min_lifetime;
   params->oride.cache_heuristic_max_lifetime = m_master.oride.cache_heuristic_max_lifetime;
-  params->oride.cache_heuristic_lm_factor    = min(max(m_master.oride.cache_heuristic_lm_factor, 0.0f), 1.0f);
+  params->oride.cache_heuristic_lm_factor    = std::min(std::max(m_master.oride.cache_heuristic_lm_factor, 0.0f), 1.0f);
 
   params->oride.cache_guaranteed_min_lifetime = m_master.oride.cache_guaranteed_min_lifetime;
   params->oride.cache_guaranteed_max_lifetime = m_master.oride.cache_guaranteed_max_lifetime;
diff --git a/proxy/http/HttpConfig.h b/proxy/http/HttpConfig.h
index b621044..b4e1d81 100644
--- a/proxy/http/HttpConfig.h
+++ b/proxy/http/HttpConfig.h
@@ -36,6 +36,7 @@
 
 #include <stdlib.h>
 #include <stdio.h>
+#include <bitset>
 
 #ifdef HAVE_CTYPE_H
 #include <ctype.h>
@@ -44,6 +45,8 @@
 #include "ts/ink_platform.h"
 #include "ts/ink_inet.h"
 #include "ts/Regex.h"
+#include "ts/string_view.h"
+#include "ts/BufferWriter.h"
 #include "HttpProxyAPIEnums.h"
 #include "ProxyConfig.h"
 #include "P_RecProcess.h"
@@ -358,6 +361,34 @@ struct HttpConfigPortRange {
   }
 };
 
+namespace HttpForwarded
+{
+// Options for what parameters will be included in "Forwarded" field header.
+//
+enum Option {
+  FOR,
+  BY_IP,              // by=<numeric IP address>.
+  BY_UNKNOWN,         // by=unknown.
+  BY_SERVER_NAME,     // by=<configured server name>.
+  BY_UUID,            // Obfuscated value for by, by=_<UUID>.
+  PROTO,              // Basic protocol (http, https) of incoming message.
+  HOST,               // Host from URL before any remapping.
+  CONNECTION_COMPACT, // Same value as 'proto' parameter.
+  CONNECTION_STD,     // Verbose protocol from Via: field, with dashes instead of spaces.
+  CONNECTION_FULL,    // Ultra-verbose protocol from Via: field, with dashes instead of spaces.
+
+  NUM_OPTIONS // Number of options.
+};
+
+using OptionBitSet = std::bitset<NUM_OPTIONS>;
+
+// Converts string specifier for Forwarded options to bitset of options, and return the result.  If there are errors, an error
+// message will be inserted into 'error'.
+//
+OptionBitSet optStrToBitset(ts::string_view optConfigStr, ts::FixedBufferWriter &error);
+
+} // end HttpForwarded namespace
+
 /////////////////////////////////////////////////////////////
 // This is a little helper class, used by the HttpConfigParams
 // and State (txn) structure. It allows for certain configs
@@ -388,6 +419,7 @@ struct OverridableHttpConfigParams {
       proxy_response_server_enabled(1),
       proxy_response_hsts_include_subdomains(0),
       insert_squid_x_forwarded_for(1),
+      insert_forwarded(HttpForwarded::OptionBitSet()),
       send_http11_requests(1),
       cache_http(1),
       cache_ignore_client_no_cache(1),
@@ -409,7 +441,7 @@ struct OverridableHttpConfigParams {
       insert_response_via_string(0),
       doc_in_cache_skip_dns(1),
       flow_control_enabled(0),
-      normalize_ae_gzip(0),
+      normalize_ae(0),
       srv_enabled(0),
       parent_failures_update_hostdb(0),
       cache_open_write_fail_action(0),
@@ -528,6 +560,11 @@ struct OverridableHttpConfigParams {
   /////////////////////
   MgmtByte insert_squid_x_forwarded_for;
 
+  ///////////////
+  // Forwarded //
+  ///////////////
+  HttpForwarded::OptionBitSet insert_forwarded;
+
   //////////////////////
   //  Version Hell    //
   //////////////////////
@@ -567,7 +604,7 @@ struct OverridableHttpConfigParams {
   ////////////////////////////////
   // Optimize gzip alternates   //
   ////////////////////////////////
-  MgmtByte normalize_ae_gzip;
+  MgmtByte normalize_ae;
 
   //////////////////////////
   // hostdb/dns variables //
diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc
index d0cf37e..1d98f31 100644
--- a/proxy/http/HttpSM.cc
+++ b/proxy/http/HttpSM.cc
@@ -452,6 +452,11 @@ HttpSM::attach_client_session(ProxyClientTransaction *client_vc, IOBufferReader
     client_sec_protocol      = protocol ? protocol : "-";
     const char *cipher       = ssl_vc->getSSLCipherSuite();
     client_cipher_suite      = cipher ? cipher : "-";
+    if (!client_tcp_reused) {
+      // Copy along the TLS handshake timings
+      milestones[TS_MILESTONE_TLS_HANDSHAKE_START] = ssl_vc->sslHandshakeBeginTime;
+      milestones[TS_MILESTONE_TLS_HANDSHAKE_END]   = ssl_vc->sslHandshakeEndTime;
+    }
   }
   const char *protocol_str = client_vc->get_protocol_string();
   client_protocol          = protocol_str ? protocol_str : "-";
diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc
index 83db45a..2c26aee 100644
--- a/proxy/http/HttpTransact.cc
+++ b/proxy/http/HttpTransact.cc
@@ -660,6 +660,12 @@ HttpTransact::StartRemapRequest(State *s)
     obj_describe(s->hdr_info.client_request.m_http, true);
   }
 
+  if (s->http_config_param->referer_filter_enabled) {
+    s->filter_mask = URL_REMAP_FILTER_REFERER;
+    if (s->http_config_param->referer_format_redirect)
+      s->filter_mask |= URL_REMAP_FILTER_REDIRECT_FMT;
+  }
+
   DebugTxn("http_trans", "END HttpTransact::StartRemapRequest");
   TRANSACT_RETURN(SM_ACTION_API_PRE_REMAP, HttpTransact::PerformRemap);
 }
@@ -704,7 +710,12 @@ HttpTransact::EndRemapRequest(State *s)
       error_body_type = "redirect#moved_temporarily";
       break;
     default:
-      Warning("Invalid status code for redirect '%d'. Building a response for a temporary redirect.", s->http_return_code);
+      if (HTTP_STATUS_NONE == s->http_return_code) {
+        s->http_return_code = HTTP_STATUS_MOVED_TEMPORARILY;
+        Warning("Changed status code from '0' to '%d'.", s->http_return_code);
+      } else {
+        Warning("Using invalid status code for redirect '%d'. Building a response for a temporary redirect.", s->http_return_code);
+      }
       error_body_type = "redirect#moved_temporarily";
     }
     build_error_response(s, s->http_return_code, "Redirect", error_body_type);
@@ -1072,21 +1083,6 @@ HttpTransact::ModifyRequest(State *s)
     }
   }
 
-  if (s->txn_conf->normalize_ae_gzip) {
-    // if enabled, force Accept-Encoding header to gzip or no header
-    MIMEField *ae_field = s->hdr_info.client_request.field_find(MIME_FIELD_ACCEPT_ENCODING, MIME_LEN_ACCEPT_ENCODING);
-
-    if (ae_field) {
-      if (HttpTransactCache::match_gzip(ae_field) == GZIP) {
-        s->hdr_info.client_request.field_value_set(ae_field, "gzip", 4);
-        DebugTxn("http_trans", "[ModifyRequest] normalized Accept-Encoding to gzip");
-      } else {
-        s->hdr_info.client_request.field_delete(ae_field);
-        DebugTxn("http_trans", "[ModifyRequest] removed non-gzip Accept-Encoding");
-      }
-    }
-  }
-
   DebugTxn("http_trans", "END HttpTransact::ModifyRequest");
   DebugTxn("http_trans", "Checking if transaction wants to upgrade");
 
@@ -2306,8 +2302,8 @@ HttpTransact::HandleCacheOpenReadHitFreshness(State *s)
   // for it. this is just to deal with the effects
   // of the skew by setting minimum and maximum times
   // so that ages are not negative, etc.
-  s->request_sent_time      = min(s->client_request_time, s->request_sent_time);
-  s->response_received_time = min(s->client_request_time, s->response_received_time);
+  s->request_sent_time      = std::min(s->client_request_time, s->request_sent_time);
+  s->response_received_time = std::min(s->client_request_time, s->response_received_time);
 
   ink_assert(s->request_sent_time <= s->response_received_time);
 
@@ -2343,8 +2339,10 @@ HttpTransact::HandleCacheOpenReadHitFreshness(State *s)
   }
 
   ink_assert(s->cache_lookup_result != HttpTransact::CACHE_LOOKUP_MISS);
-  if (s->cache_lookup_result == HttpTransact::CACHE_LOOKUP_HIT_STALE)
+  if (s->cache_lookup_result == HttpTransact::CACHE_LOOKUP_HIT_STALE) {
     SET_VIA_STRING(VIA_DETAIL_CACHE_LOOKUP, VIA_DETAIL_MISS_EXPIRED);
+    SET_VIA_STRING(VIA_CACHE_RESULT, VIA_IN_CACHE_STALE);
+  }
 
   if (!s->force_dns) { // If DNS is not performed before
     if (need_to_revalidate(s)) {
@@ -4559,7 +4557,7 @@ HttpTransact::merge_and_update_headers_for_cache_update(State *s)
     // If the cached response has an Age: we should update it
     // We could use calculate_document_age but my guess is it's overkill
     // Just use 'now' - 304's Date: + Age: (response's Age: if there)
-    date_value = max(s->current.now - date_value, (ink_time_t)0);
+    date_value = std::max(s->current.now - date_value, (ink_time_t)0);
     if (s->hdr_info.server_response.presence(MIME_PRESENCE_AGE)) {
       time_t new_age = s->hdr_info.server_response.get_age();
 
@@ -6992,7 +6990,7 @@ HttpTransact::calculate_document_freshness_limit(State *s, HTTPHdr *response, ti
       freshness_limit = (int)response->get_cooked_cc_max_age();
       DebugTxn("http_match", "calculate_document_freshness_limit --- max_age set, freshness_limit = %d", freshness_limit);
     }
-    freshness_limit = min(max(0, freshness_limit), (int)s->txn_conf->cache_guaranteed_max_lifetime);
+    freshness_limit = std::min(std::max(0, freshness_limit), (int)s->txn_conf->cache_guaranteed_max_lifetime);
   } else {
     date_set = last_modified_set = false;
 
@@ -7029,7 +7027,7 @@ HttpTransact::calculate_document_freshness_limit(State *s, HTTPHdr *response, ti
       DebugTxn("http_match", "calculate_document_freshness_limit --- Expires: %" PRId64 ", Date: %" PRId64 ", freshness_limit = %d",
                (int64_t)expires_value, (int64_t)date_value, freshness_limit);
 
-      freshness_limit = min(max(0, freshness_limit), (int)s->txn_conf->cache_guaranteed_max_lifetime);
+      freshness_limit = std::min(std::max(0, freshness_limit), (int)s->txn_conf->cache_guaranteed_max_lifetime);
     } else {
       last_modified_value = 0;
       if (response->presence(MIME_PRESENCE_LAST_MODIFIED)) {
@@ -7053,7 +7051,7 @@ HttpTransact::calculate_document_freshness_limit(State *s, HTTPHdr *response, ti
         ink_assert((f >= 0.0) && (f <= 1.0));
         ink_time_t time_since_last_modify = date_value - last_modified_value;
         int h_freshness                   = (int)(time_since_last_modify * f);
-        freshness_limit                   = max(h_freshness, 0);
+        freshness_limit                   = std::max(h_freshness, 0);
         DebugTxn("http_match", "calculate_document_freshness_limit --- heuristic: date=%" PRId64 ", lm=%" PRId64
                                ", time_since_last_modify=%" PRId64 ", f=%g, freshness_limit = %d",
                  (int64_t)date_value, (int64_t)last_modified_value, (int64_t)time_since_last_modify, f, freshness_limit);
@@ -7065,13 +7063,13 @@ HttpTransact::calculate_document_freshness_limit(State *s, HTTPHdr *response, ti
   }
 
   // The freshness limit must always fall within the min and max guaranteed bounds.
-  min_freshness_bounds = max((MgmtInt)0, s->txn_conf->cache_guaranteed_min_lifetime);
+  min_freshness_bounds = std::max((MgmtInt)0, s->txn_conf->cache_guaranteed_min_lifetime);
   max_freshness_bounds = s->txn_conf->cache_guaranteed_max_lifetime;
 
   // Heuristic freshness can be more strict.
   if (*heuristic) {
-    min_freshness_bounds = max(min_freshness_bounds, s->txn_conf->cache_heuristic_min_lifetime);
-    max_freshness_bounds = min(max_freshness_bounds, s->txn_conf->cache_heuristic_max_lifetime);
+    min_freshness_bounds = std::max(min_freshness_bounds, s->txn_conf->cache_heuristic_min_lifetime);
+    max_freshness_bounds = std::min(max_freshness_bounds, s->txn_conf->cache_heuristic_max_lifetime);
   }
   // Now clip the freshness limit.
   if (freshness_limit > max_freshness_bounds) {
@@ -7158,7 +7156,7 @@ HttpTransact::what_is_document_freshness(State *s, HTTPHdr *client_request, HTTP
   if (current_age < 0) {
     current_age = s->txn_conf->cache_guaranteed_max_lifetime;
   } else {
-    current_age = min((time_t)s->txn_conf->cache_guaranteed_max_lifetime, current_age);
+    current_age = std::min((time_t)s->txn_conf->cache_guaranteed_max_lifetime, current_age);
   }
 
   DebugTxn("http_match", "[what_is_document_freshness] fresh_limit:  %d  current_age: %" PRId64, fresh_limit, (int64_t)current_age);
@@ -7235,7 +7233,7 @@ HttpTransact::what_is_document_freshness(State *s, HTTPHdr *client_request, HTTP
     // if min-fresh set, constrain the freshness limit //
     /////////////////////////////////////////////////////
     if (cooked_cc_mask & MIME_COOKED_MASK_CC_MIN_FRESH) {
-      age_limit = min(age_limit, fresh_limit - client_request->get_cooked_cc_min_fresh());
+      age_limit = std::min(age_limit, fresh_limit - client_request->get_cooked_cc_min_fresh());
       DebugTxn("http_match", "[..._document_freshness] min_fresh set, age limit: %d", age_limit);
     }
     ///////////////////////////////////////////////////
@@ -7246,7 +7244,7 @@ HttpTransact::what_is_document_freshness(State *s, HTTPHdr *client_request, HTTP
       if (age_val == 0) {
         do_revalidate = true;
       }
-      age_limit = min(age_limit, age_val);
+      age_limit = std::min(age_limit, age_val);
       DebugTxn("http_match", "[..._document_freshness] min_fresh set, age limit: %d", age_limit);
     }
   }
@@ -7538,6 +7536,7 @@ HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_r
 
   HttpTransactHeaders::copy_header_fields(base_request, outgoing_request, s->txn_conf->fwd_proxy_auth_to_parent);
   add_client_ip_to_outgoing_request(s, outgoing_request);
+  HttpTransactHeaders::add_forwarded_field_to_request(s, outgoing_request);
   HttpTransactHeaders::remove_privacy_headers_from_request(s->http_config_param, s->txn_conf, outgoing_request);
   HttpTransactHeaders::add_global_user_agent_header_to_request(s->txn_conf, outgoing_request);
   handle_request_keep_alive_headers(s, outgoing_version, outgoing_request);
@@ -7633,6 +7632,10 @@ HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_r
     DebugTxn("http_trans", "[build_request] request expect 100-continue headers removed");
   }
 
+  // Peform any configured normalization (including per-remap-rule configuration overrides) of the Accept-Encoding header
+  // field (if any).
+  HttpTransactHeaders::normalize_accept_encoding(s->txn_conf, outgoing_request);
+
   s->request_sent_time = ink_local_time();
   s->current.now       = s->request_sent_time;
   // The assert is backwards in this case because request is being (re)sent.
@@ -8487,7 +8490,7 @@ HttpTransact::update_size_and_time_stats(State *s, ink_hrtime total_time, ink_hr
   switch (s->state_machine->background_fill) {
   case BACKGROUND_FILL_COMPLETED: {
     int64_t bg_size = origin_server_response_body_size - user_agent_response_body_size;
-    bg_size         = max((int64_t)0, bg_size);
+    bg_size         = std::max((int64_t)0, bg_size);
     HTTP_SUM_DYN_STAT(http_background_fill_bytes_completed_stat, bg_size);
     break;
   }
diff --git a/proxy/http/HttpTransact.h b/proxy/http/HttpTransact.h
index 5e2d3b3..27e7abc 100644
--- a/proxy/http/HttpTransact.h
+++ b/proxy/http/HttpTransact.h
@@ -821,7 +821,7 @@ public:
     CacheAuth_t www_auth_content = CACHE_AUTH_NONE;
 
     // INK API/Remap API plugin interface
-    void *remap_plugin_instance = 0;
+    void *remap_plugin_instance = nullptr;
     void *user_args[HTTP_SSN_TXN_MAX_USER_ARG];
     remap_plugin_info::_tsremap_os_response *fp_tsremap_os_response = nullptr;
     HTTPStatus http_return_code                                     = HTTP_STATUS_NONE;
diff --git a/proxy/http/HttpTransactCache.cc b/proxy/http/HttpTransactCache.cc
index ed8fd85..70f2d76 100644
--- a/proxy/http/HttpTransactCache.cc
+++ b/proxy/http/HttpTransactCache.cc
@@ -749,14 +749,14 @@ does_encoding_match(char *enc1, const char *enc2)
   return false;
 }
 
-ContentEncoding
-HttpTransactCache::match_gzip(MIMEField *accept_field)
+bool
+HttpTransactCache::match_content_encoding(MIMEField *accept_field, const char *encoding_identifier)
 {
   Str *a_value;
   const char *a_raw;
   StrList a_values_list;
   if (!accept_field) {
-    return NO_GZIP;
+    return false;
   }
   // TODO: Should we check the return value (count) here?
   accept_field->value_get_comma_list(&a_values_list);
@@ -773,11 +773,11 @@ HttpTransactCache::match_gzip(MIMEField *accept_field)
     }
     float q;
     q = HttpCompat::find_Q_param_in_strlist(&a_param_list);
-    if (q != 0 && does_encoding_match(a_encoding, "gzip")) {
-      return GZIP;
+    if (q != 0 && does_encoding_match(a_encoding, encoding_identifier)) {
+      return true;
     }
   }
-  return NO_GZIP;
+  return false;
 }
 
 // TODO: This used to take a length for c_raw, but that was never used, so removed it from the prototype.
@@ -910,8 +910,8 @@ HttpTransactCache::calculate_quality_of_accept_encoding_match(MIMEField *accept_
   if (!content_field) {
     if (!match_accept_content_encoding("identity", accept_field, &wildcard_present, &wildcard_q, &q)) {
       // CE was not returned, and AE does not have identity
-      if (match_gzip(accept_field) == GZIP && match_gzip(cached_accept_field) == GZIP) {
-        return (float)1.0;
+      if (match_content_encoding(accept_field, "gzip") and match_content_encoding(cached_accept_field, "gzip")) {
+        return 1.0f;
       }
       goto encoding_wildcard;
     }
@@ -949,17 +949,17 @@ encoding_wildcard:
   // still okay, but otherwise, this is just not a match at all.         //
   /////////////////////////////////////////////////////////////////////////
   if ((q == -1.0) && is_identity_encoding) {
-    if (match_gzip(accept_field) == GZIP) {
-      if (match_gzip(cached_accept_field) == GZIP) {
-        return (float)1.0;
+    if (match_content_encoding(accept_field, "gzip")) {
+      if (match_content_encoding(cached_accept_field, "gzip")) {
+        return 1.0f;
       } else {
         // always try to fetch GZIP content if we have not tried sending AE before
-        return (float)-1.0;
+        return -1.0f;
       }
-    } else if (cached_accept_field && match_gzip(cached_accept_field) != GZIP) {
-      return (float)0.001;
+    } else if (cached_accept_field && !match_content_encoding(cached_accept_field, "gzip")) {
+      return 0.001f;
     } else {
-      return (float)-1.0;
+      return -1.0f;
     }
   }
   //      q = (float)-1.0;
diff --git a/proxy/http/HttpTransactCache.h b/proxy/http/HttpTransactCache.h
index ff6bdd9..7b6b9c9 100644
--- a/proxy/http/HttpTransactCache.h
+++ b/proxy/http/HttpTransactCache.h
@@ -45,11 +45,6 @@ enum Variability_t {
   VARIABILITY_ALL,
 };
 
-enum ContentEncoding {
-  NO_GZIP = 0,
-  GZIP,
-};
-
 class HttpTransactCache
 {
 public:
@@ -70,7 +65,9 @@ public:
 
   static float calculate_quality_of_accept_encoding_match(MIMEField *accept_field, MIMEField *content_field,
                                                           MIMEField *cached_accept_field = nullptr);
-  static ContentEncoding match_gzip(MIMEField *accept_field);
+
+  // 'encoding_identifier' is a nul-terminated string.
+  static bool match_content_encoding(MIMEField *accept_field, const char *encoding_identifier);
 
   static float calculate_quality_of_accept_language_match(MIMEField *accept_field, MIMEField *content_field,
                                                           MIMEField *cached_accept_field = nullptr);
diff --git a/proxy/http/HttpTransactHeaders.cc b/proxy/http/HttpTransactHeaders.cc
index ee2bb38..358dd45 100644
--- a/proxy/http/HttpTransactHeaders.cc
+++ b/proxy/http/HttpTransactHeaders.cc
@@ -20,7 +20,12 @@
   See the License for the specific language governing permissions and
   limitations under the License.
  */
-#include "ts/ink_platform.h"
+
+#include <bitset>
+#include <algorithm>
+
+#include <ts/ink_platform.h>
+#include <ts/BufferWriter.h>
 
 #include "HttpTransact.h"
 #include "HttpTransactHeaders.h"
@@ -409,7 +414,7 @@ HttpTransactHeaders::calculate_document_age(ink_time_t request_time, ink_time_t
   // Deal with clock skew. Sigh.
   //
   // TODO solve this global clock problem
-  now_value = max(now, response_time);
+  now_value = std::max(now, response_time);
 
   ink_assert(response_time >= 0);
   ink_assert(request_time >= 0);
@@ -417,12 +422,12 @@ HttpTransactHeaders::calculate_document_age(ink_time_t request_time, ink_time_t
   ink_assert(now_value >= response_time);
 
   if (date_value > 0) {
-    apparent_age = max((time_t)0, (response_time - date_value));
+    apparent_age = std::max((time_t)0, (response_time - date_value));
   }
   if (age_value < 0) {
     current_age = -1; // Overflow from Age: header
   } else {
-    corrected_received_age = max(apparent_age, age_value);
+    corrected_received_age = std::max(apparent_age, age_value);
     response_delay         = response_time - request_time;
     corrected_initial_age  = corrected_received_age + response_delay;
     resident_time          = now_value - response_time;
@@ -591,7 +596,7 @@ HttpTransactHeaders::generate_and_set_squid_codes(HTTPHdr *header, char *via_str
     log_code = SQUID_LOG_ERR_PROXY_DENIED;
     break;
   case VIA_ERROR_CONNECTION:
-    if (log_code == SQUID_LOG_TCP_MISS) {
+    if (log_code == SQUID_LOG_TCP_MISS || log_code == SQUID_LOG_TCP_REFRESH_MISS) {
       log_code = SQUID_LOG_ERR_CONNECT_FAIL;
     }
     break;
@@ -686,50 +691,56 @@ HttpTransactHeaders::insert_server_header_in_response(const char *server_tag, in
 
 /// write the protocol stack to the @a via_string.
 /// If @a detailed then do the full stack, otherwise just the "top level" protocol.
-size_t
-write_via_protocol_stack(char *via_string, size_t len, bool detailed, ts::StringView *proto_buf, int n_proto)
+/// Returns the number of characters appended to hdr_string (no nul appended).
+int
+HttpTransactHeaders::write_hdr_protocol_stack(char *hdr_string, size_t len, ProtocolStackDetail pSDetail, ts::StringView *proto_buf,
+                                              int n_proto, char separator)
 {
-  char *via   = via_string; // keep original pointer for size computation later.
-  char *limit = via_string + len;
+  char *hdr   = hdr_string; // keep original pointer for size computation later.
+  char *limit = hdr_string + len;
   static constexpr ts::StringView tls_prefix{"tls/", ts::StringView::literal};
 
-  if (n_proto <= 0 || via == nullptr || len <= 0) {
+  if (n_proto <= 0 || hdr == nullptr || len <= 0) {
     // nothing
-  } else if (detailed) {
-    for (ts::StringView *v = proto_buf, *v_limit = proto_buf + n_proto; v < v_limit && (via + v->size() + 1) < limit; ++v) {
+  } else if (ProtocolStackDetail::Full == pSDetail) {
+    for (ts::StringView *v = proto_buf, *v_limit = proto_buf + n_proto; v < v_limit && (hdr + v->size() + 1) < limit; ++v) {
       if (v != proto_buf) {
-        *via++ = ' ';
+        *hdr++ = separator;
       }
-      memcpy(via, v->ptr(), v->size());
-      via += v->size();
+      memcpy(hdr, v->ptr(), v->size());
+      hdr += v->size();
     }
   } else {
     ts::StringView *proto_end = proto_buf + n_proto;
     bool http_1_0_p           = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_1_0) != proto_end;
     bool http_1_1_p           = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_1_1) != proto_end;
 
-    if ((http_1_0_p || http_1_1_p) && via + 10 < limit) {
+    if ((http_1_0_p || http_1_1_p) && hdr + 10 < limit) {
       bool tls_p = std::find_if(proto_buf, proto_end, [](ts::StringView tag) { return tls_prefix.isPrefixOf(tag); }) != proto_end;
-      bool http_2_p = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_2_0) != proto_end;
 
-      memcpy(via, "http", 4);
-      via += 4;
+      memcpy(hdr, "http", 4);
+      hdr += 4;
       if (tls_p)
-        *via++ = 's';
-      *via++   = '/';
-      if (http_2_p) {
-        *via++ = '2';
-      } else if (http_1_0_p) {
-        memcpy(via, "1.0", 3);
-        via += 3;
-      } else if (http_1_1_p) {
-        memcpy(via, "1.1", 3);
-        via += 3;
+        *hdr++ = 's';
+
+      // If detail level is compact (RFC 7239 compliant "proto" value for Forwarded field), stop here.
+
+      if (ProtocolStackDetail::Standard == pSDetail) {
+        *hdr++        = '/';
+        bool http_2_p = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_2_0) != proto_end;
+        if (http_2_p) {
+          *hdr++ = '2';
+        } else if (http_1_0_p) {
+          memcpy(hdr, "1.0", 3);
+          hdr += 3;
+        } else if (http_1_1_p) {
+          memcpy(hdr, "1.1", 3);
+          hdr += 3;
+        }
       }
-      *via++ = ' ';
     }
   }
-  return via - via_string;
+  return hdr - hdr_string;
 }
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -792,7 +803,10 @@ HttpTransactHeaders::insert_via_header_in_request(HttpTransact::State *s, HTTPHd
   std::array<ts::StringView, 10> proto_buf; // 10 seems like a reasonable number of protos to print
   int n_proto = s->state_machine->populate_client_protocol(proto_buf.data(), proto_buf.size());
 
-  via_string += write_via_protocol_stack(via_string, via_limit - via_string, false, proto_buf.data(), n_proto);
+  via_string +=
+    write_hdr_protocol_stack(via_string, via_limit - via_string, ProtocolStackDetail::Standard, proto_buf.data(), n_proto);
+  *via_string++ = ' ';
+
   via_string += nstrcpy(via_string, s->http_config_param->proxy_hostname);
 
   *via_string++ = '[';
@@ -822,7 +836,8 @@ HttpTransactHeaders::insert_via_header_in_request(HttpTransact::State *s, HTTPHd
     if (via_limit - via_string > 4 && s->txn_conf->insert_request_via_string > 3) { // Ultra highest verbosity
       *via_string++ = ' ';
       *via_string++ = '[';
-      via_string += write_via_protocol_stack(via_string, via_limit - via_string - 3, true, proto_buf.data(), n_proto);
+      via_string +=
+        write_hdr_protocol_stack(via_string, via_limit - via_string - 3, ProtocolStackDetail::Full, proto_buf.data(), n_proto);
       *via_string++ = ']';
     }
   }
@@ -877,7 +892,9 @@ HttpTransactHeaders::insert_via_header_in_response(HttpTransact::State *s, HTTPH
   if (ss) {
     n_proto += ss->populate_protocol(proto_buf.data() + n_proto, proto_buf.size() - n_proto);
   }
-  via_string += write_via_protocol_stack(via_string, via_limit - via_string, false, proto_buf.data(), n_proto);
+  via_string +=
+    write_hdr_protocol_stack(via_string, via_limit - via_string, ProtocolStackDetail::Standard, proto_buf.data(), n_proto);
+  *via_string++ = ' ';
 
   via_string += nstrcpy(via_string, s->http_config_param->proxy_hostname);
   *via_string++ = ' ';
@@ -902,7 +919,8 @@ HttpTransactHeaders::insert_via_header_in_response(HttpTransact::State *s, HTTPH
     if (via_limit - via_string > 4 && s->txn_conf->insert_response_via_string > 3) { // Ultra highest verbosity
       *via_string++ = ' ';
       *via_string++ = '[';
-      via_string += write_via_protocol_stack(via_string, via_limit - via_string - 3, true, proto_buf.data(), n_proto);
+      via_string +=
+        write_hdr_protocol_stack(via_string, via_limit - via_string - 3, ProtocolStackDetail::Full, proto_buf.data(), n_proto);
       *via_string++ = ']';
     }
   }
@@ -996,6 +1014,191 @@ HttpTransactHeaders::add_global_user_agent_header_to_request(OverridableHttpConf
 }
 
 void
+HttpTransactHeaders::add_forwarded_field_to_request(HttpTransact::State *s, HTTPHdr *request)
+{
+  HttpForwarded::OptionBitSet optSet = s->txn_conf->insert_forwarded;
+
+  if (optSet.any()) { // One or more Forwarded parameters enabled, so insert/append to Forwarded header.
+
+    ts::LocalBufferWriter<1024> hdr;
+
+    if (optSet[HttpForwarded::FOR] and ats_is_ip(&s->client_info.src_addr.sa)) {
+      // NOTE:  The logic within this if statement assumes that hdr is empty at this point.
+
+      hdr << "for=";
+
+      bool is_ipv6 = ats_is_ip6(&s->client_info.src_addr.sa);
+
+      if (is_ipv6) {
+        hdr << "\"[";
+      }
+
+      if (ats_ip_ntop(&s->client_info.src_addr.sa, hdr.auxBuffer(), hdr.remaining()) == nullptr) {
+        Debug("http_trans", "[add_forwarded_field_to_outgoing_request] ats_ip_ntop() call failed");
+        return;
+      }
+
+      // Fail-safe.
+      hdr.auxBuffer()[hdr.remaining() - 1] = '\0';
+
+      hdr.write(strlen(hdr.auxBuffer()));
+
+      if (is_ipv6) {
+        hdr << "]\"";
+      }
+    }
+
+    if (optSet[HttpForwarded::BY_UNKNOWN]) {
+      if (hdr.size()) {
+        hdr << ';';
+      }
+
+      hdr << "by=unknown";
+    }
+
+    if (optSet[HttpForwarded::BY_SERVER_NAME]) {
+      if (hdr.size()) {
+        hdr << ';';
+      }
+
+      hdr << "by=" << s->http_config_param->proxy_hostname;
+    }
+
+    const Machine &m = *Machine::instance();
+
+    if (optSet[HttpForwarded::BY_UUID] and m.uuid.valid()) {
+      if (hdr.size()) {
+        hdr << ';';
+      }
+
+      hdr << "by=_" << m.uuid.getString();
+    }
+
+    if (optSet[HttpForwarded::BY_IP] and (m.ip_string_len > 0)) {
+      if (hdr.size()) {
+        hdr << ';';
+      }
+
+      hdr << "by=";
+
+      bool is_ipv6 = ats_is_ip6(&s->client_info.dst_addr.sa);
+
+      if (is_ipv6) {
+        hdr << "\"[";
+      }
+
+      if (ats_ip_ntop(&s->client_info.dst_addr.sa, hdr.auxBuffer(), hdr.remaining()) == nullptr) {
+        Debug("http_trans", "[add_forwarded_field_to_outgoing_request] ats_ip_ntop() call failed");
+        return;
+      }
+
+      // Fail-safe.
+      hdr.auxBuffer()[hdr.remaining() - 1] = '\0';
+
+      hdr.write(strlen(hdr.auxBuffer()));
+
+      if (is_ipv6) {
+        hdr << "]\"";
+      }
+    }
+
+    std::array<ts::StringView, 10> protoBuf; // 10 seems like a reasonable number of protos to print
+    int nProto = 0;                          // Indulge clang's incorrect claim that this need to be initialized.
+
+    static const HttpForwarded::OptionBitSet OptionsNeedingProtocol = HttpForwarded::OptionBitSet()
+                                                                        .set(HttpForwarded::PROTO)
+                                                                        .set(HttpForwarded::CONNECTION_COMPACT)
+                                                                        .set(HttpForwarded::CONNECTION_STD)
+                                                                        .set(HttpForwarded::CONNECTION_FULL);
+
+    if ((optSet bitand OptionsNeedingProtocol).any()) {
+      nProto = s->state_machine->populate_client_protocol(protoBuf.data(), protoBuf.size());
+    }
+
+    if (optSet[HttpForwarded::PROTO] and (nProto > 0)) {
+      if (hdr.size()) {
+        hdr << ';';
+      }
+
+      hdr << "proto=";
+
+      int numChars = HttpTransactHeaders::write_hdr_protocol_stack(
+        hdr.auxBuffer(), hdr.remaining(), HttpTransactHeaders::ProtocolStackDetail::Compact, protoBuf.data(), nProto, '-');
+      if (numChars > 0) {
+        hdr.write(size_t(numChars));
+      }
+    }
+
+    if (optSet[HttpForwarded::HOST]) {
+      const MIMEField *hostField = s->hdr_info.client_request.field_find(MIME_FIELD_HOST, MIME_LEN_HOST);
+
+      if (hostField and hostField->m_len_value) {
+        ts::string_view hSV{hostField->m_ptr_value, hostField->m_len_value};
+
+        bool needsDoubleQuotes = hSV.find(':') != ts::string_view::npos;
+
+        if (hdr.size()) {
+          hdr << ';';
+        }
+
+        hdr << "host=";
+        if (needsDoubleQuotes) {
+          hdr << '"';
+        }
+        hdr << hSV;
+        if (needsDoubleQuotes) {
+          hdr << '"';
+        }
+      }
+    }
+
+    if (nProto > 0) {
+      auto Conn = [&](HttpForwarded::Option opt, HttpTransactHeaders::ProtocolStackDetail detail) -> void {
+        if (optSet[opt]) {
+          int revert = hdr.size();
+
+          if (hdr.size()) {
+            hdr << ';';
+          }
+
+          hdr << "connection=";
+
+          int numChars =
+            HttpTransactHeaders::write_hdr_protocol_stack(hdr.auxBuffer(), hdr.remaining(), detail, protoBuf.data(), nProto, '-');
+          if (numChars > 0) {
+            hdr.write(size_t(numChars));
+          }
+
+          if ((numChars <= 0) or (hdr.size() >= hdr.capacity())) {
+            // Remove parameter with potentially incomplete value.
+            //
+            hdr.reduce(revert);
+          }
+        }
+      };
+
+      Conn(HttpForwarded::CONNECTION_COMPACT, HttpTransactHeaders::ProtocolStackDetail::Compact);
+      Conn(HttpForwarded::CONNECTION_STD, HttpTransactHeaders::ProtocolStackDetail::Standard);
+      Conn(HttpForwarded::CONNECTION_FULL, HttpTransactHeaders::ProtocolStackDetail::Full);
+    }
+
+    // Add or append to the Forwarded header.  As a fail-safe against corrupting the MIME header, don't add Forwarded if
+    // it's size is exactly the capacity of the buffer.
+    //
+    if (hdr.size() and !hdr.error() and (hdr.size() < hdr.capacity())) {
+      ts::string_view sV = hdr.view();
+
+      request->value_append(MIME_FIELD_FORWARDED, MIME_LEN_FORWARDED, sV.data(), sV.size(), true, ','); // true => separator must
+                                                                                                        // be inserted
+
+      Debug("http_trans", "[add_forwarded_field_to_outgoing_request] Forwarded header (%.*s) added", static_cast<int>(hdr.size()),
+            hdr.data());
+    }
+  }
+
+} // end HttpTransact::add_forwarded_field_to_outgoing_request()
+
+void
 HttpTransactHeaders::add_server_header_to_response(OverridableHttpConfigParams *http_txn_conf, HTTPHdr *header)
 {
   if (http_txn_conf->proxy_response_server_enabled && http_txn_conf->proxy_response_server_string) {
@@ -1073,3 +1276,45 @@ HttpTransactHeaders::remove_privacy_headers_from_request(HttpConfigParams *http_
     }
   }
 }
+
+void
+HttpTransactHeaders::normalize_accept_encoding(const OverridableHttpConfigParams *ohcp, HTTPHdr *header)
+{
+  int normalize_ae = ohcp->normalize_ae;
+
+  if (normalize_ae) {
+    MIMEField *ae_field = header->field_find(MIME_FIELD_ACCEPT_ENCODING, MIME_LEN_ACCEPT_ENCODING);
+
+    if (ae_field) {
+      if (normalize_ae == 1) {
+        // Force Accept-Encoding header to gzip or no header.
+        if (HttpTransactCache::match_content_encoding(ae_field, "gzip")) {
+          header->field_value_set(ae_field, "gzip", 4);
+          Debug("http_trans", "[Headers::normalize_accept_encoding] normalized Accept-Encoding to gzip");
+        } else {
+          header->field_delete(ae_field);
+          Debug("http_trans", "[Headers::normalize_accept_encoding] removed non-gzip Accept-Encoding");
+        }
+      } else if (normalize_ae == 2) {
+        // Force Accept-Encoding header to br (Brotli) or no header.
+        if (HttpTransactCache::match_content_encoding(ae_field, "br")) {
+          header->field_value_set(ae_field, "br", 2);
+          Debug("http_trans", "[Headers::normalize_accept_encoding] normalized Accept-Encoding to br");
+        } else if (HttpTransactCache::match_content_encoding(ae_field, "gzip")) {
+          header->field_value_set(ae_field, "gzip", 4);
+          Debug("http_trans", "[Headers::normalize_accept_encoding] normalized Accept-Encoding to gzip");
+        } else {
+          header->field_delete(ae_field);
+          Debug("http_trans", "[Headers::normalize_accept_encoding] removed non-br Accept-Encoding");
+        }
+      } else {
+        static bool logged = false;
+
+        if (!logged) {
+          Error("proxy.config.http.normalize_ae value out of range");
+          logged = true;
+        }
+      }
+    }
+  }
+}
diff --git a/proxy/http/HttpTransactHeaders.h b/proxy/http/HttpTransactHeaders.h
index c27a6a3..57d468f 100644
--- a/proxy/http/HttpTransactHeaders.h
+++ b/proxy/http/HttpTransactHeaders.h
@@ -62,6 +62,11 @@ public:
 
   static void generate_and_set_squid_codes(HTTPHdr *header, char *via_string, HttpTransact::SquidLogInfo *squid_codes);
 
+  enum class ProtocolStackDetail { Compact, Standard, Full };
+
+  static int write_hdr_protocol_stack(char *hdr_string, size_t len, ProtocolStackDetail pSDetail, ts::StringView *proto_buf,
+                                      int n_proto, char separator = ' ');
+
   // Removing handle_conditional_headers.  Functionality appears to be elsewhere (issue_revalidate)
   // and the only condition when it does anything causes an assert to go
   // off
@@ -75,8 +80,12 @@ public:
   static void insert_via_header_in_response(HttpTransact::State *s, HTTPHdr *header);
   static void insert_hsts_header_in_response(HttpTransact::State *s, HTTPHdr *header);
 
+  static void add_forwarded_field_to_request(HttpTransact::State *s, HTTPHdr *request);
+
   static bool is_request_proxy_authorized(HTTPHdr *incoming_hdr);
 
+  static void normalize_accept_encoding(const OverridableHttpConfigParams *ohcp, HTTPHdr *header);
+
   static void insert_basic_realm_in_proxy_authenticate(const char *realm, HTTPHdr *header, bool bRevPrxy);
 
   static void remove_conditional_headers(HTTPHdr *outgoing);
diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc
index 5eb9299..a5caec0 100644
--- a/proxy/http/HttpTunnel.cc
+++ b/proxy/http/HttpTunnel.cc
@@ -207,7 +207,7 @@ ChunkedHandler::transfer_bytes()
 
   // Handle the case where we are doing chunked passthrough.
   if (!dechunked_buffer) {
-    moved = MIN(bytes_left, chunked_reader->read_avail());
+    moved = std::min(bytes_left, chunked_reader->read_avail());
     chunked_reader->consume(moved);
     bytes_left = bytes_left - moved;
     return moved;
@@ -216,7 +216,7 @@ ChunkedHandler::transfer_bytes()
   while (bytes_left > 0) {
     block_read_avail = chunked_reader->block_read_avail();
 
-    to_move = MIN(bytes_left, block_read_avail);
+    to_move = std::min(bytes_left, block_read_avail);
     if (to_move <= 0) {
       break;
     }
@@ -348,7 +348,7 @@ ChunkedHandler::generate_chunked_content()
   }
 
   while ((r_avail = dechunked_reader->read_avail()) > 0 && state != CHUNK_WRITE_DONE) {
-    int64_t write_val = MIN(max_chunk_size, r_avail);
+    int64_t write_val = std::min(max_chunk_size, r_avail);
 
     state = CHUNK_WRITE_CHUNK;
     Debug("http_chunk", "creating a chunk of size %" PRId64 " bytes", write_val);
diff --git a/proxy/http/Makefile.am b/proxy/http/Makefile.am
index 0d9605a..4f68f96 100644
--- a/proxy/http/Makefile.am
+++ b/proxy/http/Makefile.am
@@ -73,13 +73,28 @@ libhttp_a_SOURCES = \
   HttpTunnel.cc \
   HttpTunnel.h \
   HttpUpdateSM.cc \
-  HttpUpdateSM.h
+  HttpUpdateSM.h \
+  ForwardedConfig.cc
 
 if BUILD_TESTS
   libhttp_a_SOURCES += HttpUpdateTester.cc \
     RegressionHttpTransact.cc
 endif
 
+check_PROGRAMS = \
+test_ForwardedConfig
+
+TESTS = $(check_PROGRAMS)
+
+test_ForwardedConfig_CPPFLAGS = $(AM_CPPFLAGS)\
+  -I$(abs_top_srcdir)/tests/include
+
+test_ForwardedConfig_SOURCES = \
+  unit-tests/test_ForwardedConfig.cc \
+  ForwardedConfig.cc \
+  unit-tests/test_ForwardedConfig_mocks.cc \
+  unit-tests/sym-links/MemView.cc
+
 tidy-local: $(libhttp_a_SOURCES) $(noinst_HEADERS)
 	$(CXX_Clang_Tidy)
 
diff --git a/proxy/http/remap/RemapConfig.cc b/proxy/http/remap/RemapConfig.cc
index b9e3ade..1f889df 100644
--- a/proxy/http/remap/RemapConfig.cc
+++ b/proxy/http/remap/RemapConfig.cc
@@ -337,7 +337,7 @@ parse_include_directive(const char *directive, BUILD_TABLE_INFO *bti, char *errb
           continue;
         }
 
-        subpath = Layout::relative_to(path, entrylist[j]->d_name);
+        subpath = Layout::relative_to(path.get(), entrylist[j]->d_name);
 
         if (ink_file_is_directory(subpath)) {
           continue;
diff --git a/proxy/http/remap/RemapProcessor.cc b/proxy/http/remap/RemapProcessor.cc
index ff622f0..767df83 100644
--- a/proxy/http/remap/RemapProcessor.cc
+++ b/proxy/http/remap/RemapProcessor.cc
@@ -227,7 +227,8 @@ RemapProcessor::finish_remap(HttpTransact::State *s)
       if (*redirect_url == nullptr) {
         *redirect_url = ats_strdup(map->filter_redirect_url ? map->filter_redirect_url : rewrite_table->http_default_redirect_url);
       }
-
+      if (HTTP_STATUS_NONE == s->http_return_code)
+        s->http_return_code = HTTP_STATUS_MOVED_TEMPORARILY;
       return false;
     }
   }
diff --git a/proxy/http/unit-tests/sym-links/MemView.cc b/proxy/http/unit-tests/sym-links/MemView.cc
new file mode 120000
index 0000000..51e80fb
--- /dev/null
+++ b/proxy/http/unit-tests/sym-links/MemView.cc
@@ -0,0 +1 @@
+../../../../lib/ts/MemView.cc
\ No newline at end of file
diff --git a/proxy/http/unit-tests/test_ForwardedConfig.cc b/proxy/http/unit-tests/test_ForwardedConfig.cc
new file mode 100644
index 0000000..712eb9b
--- /dev/null
+++ b/proxy/http/unit-tests/test_ForwardedConfig.cc
@@ -0,0 +1,169 @@
+/** @file
+
+  Catch-based tests for ForwardedConfig.cc.
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ */
+
+#include <string>
+#include <cstring>
+#include <cctype>
+#include <bitset>
+#include <initializer_list>
+
+#define CATCH_CONFIG_MAIN
+#include "catch.hpp"
+
+#include "HttpConfig.h"
+
+using namespace HttpForwarded;
+
+class OptionBitSetListInit : public OptionBitSet
+{
+public:
+  OptionBitSetListInit(std::initializer_list<std::size_t> il)
+  {
+    for (std::size_t i : il) {
+      this->set(i);
+    }
+  }
+};
+
+namespace
+{
+const char *wsTbl[] = {"", " ", "  ", nullptr};
+
+int wsIdx{0};
+
+const char *
+nextWs()
+{
+  ++wsIdx;
+
+  if (!wsTbl[wsIdx]) {
+    wsIdx = 0;
+  }
+
+  return wsTbl[wsIdx];
+}
+// Alternate upper/lower case and add blanks.
+class XS
+{
+private:
+  std::string s;
+
+public:
+  XS(const char *in) : s{nextWs()}
+  {
+    bool upper{true};
+    for (; *in; ++in) {
+      if (islower(*in)) {
+        s += upper ? toupper(*in) : *in;
+        upper = !upper;
+
+      } else if (isupper(*in)) {
+        s += upper ? *in : tolower(*in);
+        upper = !upper;
+
+      } else {
+        s += *in;
+      }
+      s += nextWs();
+    }
+    s += nextWs();
+  }
+
+  operator ts::string_view() const { return ts::string_view(s.c_str()); }
+};
+
+void
+test(const char *spec, const char *reqErr, OptionBitSet bS)
+{
+  ts::LocalBufferWriter<1024> error;
+
+  error << "cheese";
+
+  REQUIRE(bS == optStrToBitset(XS(spec), error));
+  std::size_t len = std::strlen(reqErr);
+  REQUIRE((error.size() - sizeof("cheese") + 1) == len);
+  REQUIRE(std::memcmp(error.data() + sizeof("cheese") - 1, reqErr, len) == 0);
+}
+
+} // end annonymous namespace
+
+TEST_CASE("Forwarded", "[FWD]")
+{
+  test("none", "", OptionBitSet());
+
+  test("", "\"Forwarded\" configuration: \"   \" is a bad option.", OptionBitSet());
+
+  test("\t", "\"Forwarded\" configuration: \"\t   \" is a bad option.", OptionBitSet());
+
+  test(":", "\"Forwarded\" configuration: \"   \" is a bad option.", OptionBitSet());
+
+  test("|", "\"Forwarded\" configuration: \"   \" is a bad option.", OptionBitSet());
+
+  test("by=ip", "", OptionBitSetListInit{BY_IP});
+
+  test("by=unknown", "", OptionBitSetListInit{BY_UNKNOWN});
+
+  test("by=servername", "", OptionBitSetListInit{BY_SERVER_NAME});
+
+  test("by=uuid", "", OptionBitSetListInit{BY_UUID});
+
+  test("for", "", OptionBitSetListInit{FOR});
+
+  test("proto", "", OptionBitSetListInit{PROTO});
+
+  test("host", "", OptionBitSetListInit{HOST});
+
+  test("connection=compact", "", OptionBitSetListInit{CONNECTION_COMPACT});
+
+  test("connection=standard", "", OptionBitSetListInit{CONNECTION_STD});
+
+  test("connection=std", "", OptionBitSetListInit{CONNECTION_STD});
+
+  test("connection=full", "", OptionBitSetListInit{CONNECTION_FULL});
+
+  test("proto:by=uuid|for", "", OptionBitSetListInit{PROTO, BY_UUID, FOR});
+
+  test("proto:by=cheese|fur", "\"Forwarded\" configuration: \" b  Y= c  He E  sE \" and \"  fU r  \" are bad options.",
+       OptionBitSet());
+
+  test("proto:by=cheese|fur|compact=",
+       "\"Forwarded\" configuration: \" b  Y= c  He E  sE \", \"  fU r  \" and \"C o  Mp A  cT =  \" are bad options.",
+       OptionBitSet());
+
+#undef X
+#define X(S)                                                                                                                  \
+  "by=ip" S "by=unknown" S "by=servername" S "by=uuid" S "for" S "proto" S "host" S "connection=compact" S "connection=std" S \
+  "connection=full"
+
+  test(X(":"), "", OptionBitSet().set());
+
+  test(X("|"), "", OptionBitSet().set());
+
+  test(X("|") "|" X(":"), "", OptionBitSet().set());
+
+  test(X("|") ":abcd", "\"Forwarded\" configuration: \"  aB c  D \" is a bad option.", OptionBitSet());
+
+  test(X("|") ":for=abcd", "\"Forwarded\" configuration: \" f  Or =  Ab C  d \" is a bad option.", OptionBitSet());
+
+  test(X("|") ":by", "\"Forwarded\" configuration: \" b  Y \" is a bad option.", OptionBitSet());
+}
diff --git a/proxy/http/unit-tests/test_ForwardedConfig_mocks.cc b/proxy/http/unit-tests/test_ForwardedConfig_mocks.cc
new file mode 100644
index 0000000..a0fe062
--- /dev/null
+++ b/proxy/http/unit-tests/test_ForwardedConfig_mocks.cc
@@ -0,0 +1,86 @@
+/** @file
+
+  Mocks for unit test of ForwardedConfig.cc
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ */
+
+#include <cstdlib>
+#include <iostream>
+
+#include <I_EventSystem.h>
+#include <I_Thread.h>
+
+void
+_ink_assert(const char *expression, const char *file, int line)
+{
+  std::cerr << "fatal error: ink_assert: file: " << file << " line: " << line << " expression: " << expression << std::endl;
+
+  std::exit(1);
+}
+
+namespace
+{
+void
+stub(const char *file, int line)
+{
+  std::cerr << "fatal error: call to link stub: file: " << file << " line: " << line << std::endl;
+
+  std::exit(1);
+}
+}
+
+#define STUB stub(__FILE__, __LINE__);
+
+inkcoreapi void
+ink_freelist_init(InkFreeList **fl, const char *name, uint32_t type_size, uint32_t chunk_size, uint32_t alignment)
+{
+}
+inkcoreapi void
+ink_freelist_free(InkFreeList *f, void *item){STUB} inkcoreapi
+  void ink_freelist_free_bulk(InkFreeList *f, void *head, void *tail, size_t num_item)
+{
+  STUB
+}
+void ink_mutex_destroy(pthread_mutex_t *){STUB} inkcoreapi ClassAllocator<ProxyMutex> mutexAllocator("ARGH");
+inkcoreapi ink_thread_key Thread::thread_data_key;
+volatile int res_track_memory;
+void ResourceTracker::increment(const char *, long){STUB} inkcoreapi Allocator ioBufAllocator[DEFAULT_BUFFER_SIZES];
+void
+ats_free(void *)
+{
+  STUB
+}
+int thread_freelist_high_watermark;
+int thread_freelist_low_watermark;
+inkcoreapi ClassAllocator<IOBufferBlock> ioBlockAllocator("ARGH");
+inkcoreapi ClassAllocator<IOBufferData> ioDataAllocator("ARGH");
+IOBufferBlock::IOBufferBlock()
+{
+}
+
+void
+IOBufferBlock::free()
+{
+}
+
+void
+IOBufferData::free()
+{
+}
diff --git a/proxy/http2/HTTP2.cc b/proxy/http2/HTTP2.cc
index 3295161..dd78520 100644
--- a/proxy/http2/HTTP2.cc
+++ b/proxy/http2/HTTP2.cc
@@ -588,7 +588,7 @@ http2_encode_header_blocks(HTTPHdr *in, uint8_t *out, uint32_t out_len, uint32_t
                            int32_t maximum_table_size)
 {
   // Limit the maximum table size to 64kB, which is the size advertised by major clients
-  maximum_table_size = min(maximum_table_size, HTTP2_MAX_TABLE_SIZE_LIMIT);
+  maximum_table_size = std::min(maximum_table_size, HTTP2_MAX_TABLE_SIZE_LIMIT);
   // Set maximum table size only if it is different from current maximum size
   if (maximum_table_size == hpack_get_maximum_table_size(handle)) {
     maximum_table_size = -1;
@@ -727,6 +727,7 @@ uint32_t Http2::max_request_header_size    = 131072;
 uint32_t Http2::accept_no_activity_timeout = 120;
 uint32_t Http2::no_activity_timeout_in     = 120;
 uint32_t Http2::active_timeout_in          = 0;
+uint32_t Http2::push_diary_size            = 256;
 
 void
 Http2::init()
@@ -743,6 +744,7 @@ Http2::init()
   REC_EstablishStaticConfigInt32U(accept_no_activity_timeout, "proxy.config.http2.accept_no_activity_timeout");
   REC_EstablishStaticConfigInt32U(no_activity_timeout_in, "proxy.config.http2.no_activity_timeout_in");
   REC_EstablishStaticConfigInt32U(active_timeout_in, "proxy.config.http2.active_timeout_in");
+  REC_EstablishStaticConfigInt32U(push_diary_size, "proxy.config.http2.push_diary_size");
 
   // If any settings is broken, ATS should not start
   ink_release_assert(http2_settings_parameter_is_valid({HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, max_concurrent_streams_in}));
diff --git a/proxy/http2/HTTP2.h b/proxy/http2/HTTP2.h
index d31c216..77bf174 100644
--- a/proxy/http2/HTTP2.h
+++ b/proxy/http2/HTTP2.h
@@ -378,6 +378,7 @@ public:
   static uint32_t accept_no_activity_timeout;
   static uint32_t no_activity_timeout_in;
   static uint32_t active_timeout_in;
+  static uint32_t push_diary_size;
 
   static void init();
 };
diff --git a/proxy/http2/Http2ClientSession.cc b/proxy/http2/Http2ClientSession.cc
index 97f5103..6acd49a 100644
--- a/proxy/http2/Http2ClientSession.cc
+++ b/proxy/http2/Http2ClientSession.cc
@@ -76,6 +76,10 @@ Http2ClientSession::destroy()
 void
 Http2ClientSession::free()
 {
+  if (h2_pushed_urls) {
+    this->h2_pushed_urls = ink_hash_table_destroy(this->h2_pushed_urls);
+  }
+
   if (client_vc) {
     release_netvc();
     client_vc->do_io_close();
@@ -180,6 +184,8 @@ Http2ClientSession::new_connection(NetVConnection *new_vc, MIOBuffer *iobuf, IOB
   this->read_buffer             = iobuf ? iobuf : new_MIOBuffer(HTTP2_HEADER_BUFFER_SIZE_INDEX);
   this->read_buffer->water_mark = connection_state.server_settings.get(HTTP2_SETTINGS_MAX_FRAME_SIZE);
   this->sm_reader               = reader ? reader : this->read_buffer->alloc_reader();
+  this->h2_pushed_urls          = ink_hash_table_create(InkHashTableKeyType_String);
+  this->h2_pushed_urls_size     = 0;
 
   this->write_buffer = new_MIOBuffer(HTTP2_HEADER_BUFFER_SIZE_INDEX);
   this->sm_writer    = this->write_buffer->alloc_reader();
diff --git a/proxy/http2/Http2ClientSession.h b/proxy/http2/Http2ClientSession.h
index 24c8ce0..d81fdbc 100644
--- a/proxy/http2/Http2ClientSession.h
+++ b/proxy/http2/Http2ClientSession.h
@@ -292,6 +292,26 @@ public:
     return half_close_local;
   }
 
+  bool
+  is_url_pushed(const char *url, int url_len)
+  {
+    char *dup_url            = ats_strndup(url, url_len);
+    InkHashTableEntry *entry = ink_hash_table_lookup_entry(h2_pushed_urls, dup_url);
+    ats_free(dup_url);
+    return entry != nullptr;
+  }
+
+  void
+  add_url_to_pushed_table(const char *url, int url_len)
+  {
+    if (h2_pushed_urls_size < Http2::push_diary_size) {
+      char *dup_url = ats_strndup(url, url_len);
+      ink_hash_table_insert(h2_pushed_urls, dup_url, nullptr);
+      h2_pushed_urls_size++;
+      ats_free(dup_url);
+    }
+  }
+
   // noncopyable
   Http2ClientSession(Http2ClientSession &) = delete;
   Http2ClientSession &operator=(const Http2ClientSession &) = delete;
@@ -329,6 +349,9 @@ private:
   bool kill_me          = false;
   bool half_close_local = false;
   int recursion         = 0;
+
+  InkHashTable *h2_pushed_urls = nullptr;
+  uint32_t h2_pushed_urls_size = 0;
 };
 
 extern ClassAllocator<Http2ClientSession> http2ClientSessionAllocator;
diff --git a/proxy/http2/Http2ConnectionState.cc b/proxy/http2/Http2ConnectionState.cc
index c397d36..6cfe32f 100644
--- a/proxy/http2/Http2ConnectionState.cc
+++ b/proxy/http2/Http2ConnectionState.cc
@@ -167,7 +167,7 @@ rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame)
   myreader->writer()->dealloc_reader(myreader);
 
   uint32_t initial_rwnd = cstate.server_settings.get(HTTP2_SETTINGS_INITIAL_WINDOW_SIZE);
-  uint32_t min_rwnd     = min(initial_rwnd, cstate.server_settings.get(HTTP2_SETTINGS_MAX_FRAME_SIZE));
+  uint32_t min_rwnd     = std::min(initial_rwnd, cstate.server_settings.get(HTTP2_SETTINGS_MAX_FRAME_SIZE));
   // Connection level WINDOW UPDATE
   if (cstate.server_rwnd <= min_rwnd) {
     Http2WindowSize diff_size = initial_rwnd - cstate.server_rwnd;
@@ -279,12 +279,12 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame)
   }
 
   if (new_stream && Http2::stream_priority_enabled) {
-    DependencyTree::Node *node = cstate.dependency_tree->find(stream_id);
+    Http2DependencyTree::Node *node = cstate.dependency_tree->find(stream_id);
     if (node != nullptr) {
       stream->priority_node = node;
       node->t               = stream;
     } else {
-      DebugHttp2Stream(cstate.ua_session, stream_id, "PRIORITY - dep: %d, weight: %d, excl: %d, tree size: %d",
+      DebugHttp2Stream(cstate.ua_session, stream_id, "HEADER PRIORITY - dep: %d, weight: %d, excl: %d, tree size: %d",
                        params.priority.stream_dependency, params.priority.weight, params.priority.exclusive_flag,
                        cstate.dependency_tree->size());
 
@@ -398,7 +398,7 @@ rcv_priority_frame(Http2ConnectionState &cstate, const Http2Frame &frame)
   DebugHttp2Stream(cstate.ua_session, stream_id, "PRIORITY - dep: %d, weight: %d, excl: %d, tree size: %d",
                    priority.stream_dependency, priority.weight, priority.exclusive_flag, cstate.dependency_tree->size());
 
-  DependencyTree::Node *node = cstate.dependency_tree->find(stream_id);
+  Http2DependencyTree::Node *node = cstate.dependency_tree->find(stream_id);
 
   if (node != nullptr) {
     // [RFC 7540] 5.3.3 Reprioritization
@@ -713,7 +713,7 @@ rcv_window_update_frame(Http2ConnectionState &cstate, const Http2Frame &frame)
     }
 
     stream->client_rwnd += size;
-    ssize_t wnd = min(cstate.client_rwnd, stream->client_rwnd);
+    ssize_t wnd = std::min(cstate.client_rwnd, stream->client_rwnd);
 
     if (!stream->is_closed() && stream->get_state() == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE && wnd > 0) {
       stream->send_response_body();
@@ -1072,7 +1072,7 @@ Http2ConnectionState::restart_streams()
   while (s) {
     Http2Stream *next = static_cast<Http2Stream *>(s->link.next);
     if (!s->is_closed() && s->get_state() == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE &&
-        min(this->client_rwnd, s->client_rwnd) > 0) {
+        std::min(this->client_rwnd, s->client_rwnd) > 0) {
       s->send_response_body();
     }
     ink_assert(s != next);
@@ -1114,7 +1114,7 @@ Http2ConnectionState::delete_stream(Http2Stream *stream)
   DebugHttp2Stream(ua_session, stream->get_id(), "Delete stream");
 
   if (Http2::stream_priority_enabled) {
-    DependencyTree::Node *node = stream->priority_node;
+    Http2DependencyTree::Node *node = stream->priority_node;
     if (node != nullptr) {
       if (node->active) {
         dependency_tree->deactivate(node, 0);
@@ -1187,7 +1187,7 @@ Http2ConnectionState::schedule_stream(Http2Stream *stream)
 {
   DebugHttp2Stream(ua_session, stream->get_id(), "Scheduled");
 
-  DependencyTree::Node *node = stream->priority_node;
+  Http2DependencyTree::Node *node = stream->priority_node;
   ink_release_assert(node != nullptr);
 
   SCOPED_MUTEX_LOCK(lock, this->mutex, this_ethread());
@@ -1204,14 +1204,14 @@ Http2ConnectionState::schedule_stream(Http2Stream *stream)
 void
 Http2ConnectionState::send_data_frames_depends_on_priority()
 {
-  DependencyTree::Node *node = dependency_tree->top();
+  Http2DependencyTree::Node *node = dependency_tree->top();
 
   // No node to send or no connection level window left
   if (node == nullptr || client_rwnd <= 0) {
     return;
   }
 
-  Http2Stream *stream = node->t;
+  Http2Stream *stream = static_cast<Http2Stream *>(node->t);
   ink_release_assert(stream != nullptr);
   DebugHttp2Stream(ua_session, stream->get_id(), "top node, point=%d", node->point);
 
@@ -1246,9 +1246,9 @@ Http2ConnectionState::send_data_frames_depends_on_priority()
 Http2SendADataFrameResult
 Http2ConnectionState::send_a_data_frame(Http2Stream *stream, size_t &payload_length)
 {
-  const ssize_t window_size         = min(this->client_rwnd, stream->client_rwnd);
+  const ssize_t window_size         = std::min(this->client_rwnd, stream->client_rwnd);
   const size_t buf_len              = BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_DATA]);
-  const size_t write_available_size = min(buf_len, static_cast<size_t>(window_size));
+  const size_t write_available_size = std::min(buf_len, static_cast<size_t>(window_size));
   size_t read_available_size        = 0;
 
   uint8_t flags = 0x00;
@@ -1414,8 +1414,8 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream)
   flags = 0;
   while (sent < header_blocks_size) {
     DebugHttp2Stream(ua_session, stream->get_id(), "Send CONTINUATION frame");
-    payload_length = MIN(static_cast<uint32_t>(BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_CONTINUATION])),
-                         header_blocks_size - sent);
+    payload_length = std::min(static_cast<uint32_t>(BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_CONTINUATION])),
+                              static_cast<uint32_t>(header_blocks_size - sent));
     if (sent + payload_length == header_blocks_size) {
       flags |= HTTP2_FLAGS_CONTINUATION_END_HEADERS;
     }
@@ -1435,7 +1435,7 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream)
 }
 
 void
-Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url)
+Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url, const MIMEField *accept_encoding)
 {
   HTTPHdr h1_hdr, h2_hdr;
   uint8_t *buf                = nullptr;
@@ -1454,6 +1454,21 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url)
   h1_hdr.create(HTTP_TYPE_REQUEST);
   h1_hdr.url_set(&url);
   h1_hdr.method_set("GET", 3);
+  if (accept_encoding != nullptr) {
+    MIMEField *f;
+    const char *name;
+    int name_len;
+    const char *value;
+    int value_len;
+
+    name  = accept_encoding->name_get(&name_len);
+    f     = h1_hdr.field_create(name, name_len);
+    value = accept_encoding->value_get(&value_len);
+    f->value_set(h1_hdr.m_heap, h1_hdr.m_mime, value, value_len);
+
+    h1_hdr.field_attach(f);
+  }
+
   http2_generate_h2_header_from_1_1(&h1_hdr, &h2_hdr);
 
   buf_len = h1_hdr.length_get() * 2; // Make it double just in case
@@ -1496,8 +1511,8 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url)
   flags = 0;
   while (sent < header_blocks_size) {
     DebugHttp2Stream(ua_session, stream->get_id(), "Send CONTINUATION frame");
-    payload_length = MIN(static_cast<uint32_t>(BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_CONTINUATION])),
-                         header_blocks_size - sent);
+    payload_length = std::min(static_cast<uint32_t>(BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_CONTINUATION])),
+                              static_cast<uint32_t>(header_blocks_size - sent));
     if (sent + payload_length == header_blocks_size) {
       flags |= HTTP2_FLAGS_CONTINUATION_END_HEADERS;
     }
@@ -1519,7 +1534,7 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url)
     return;
   }
   if (Http2::stream_priority_enabled) {
-    DependencyTree::Node *node = this->dependency_tree->find(id);
+    Http2DependencyTree::Node *node = this->dependency_tree->find(id);
     if (node != nullptr) {
       stream->priority_node = node;
     } else {
diff --git a/proxy/http2/Http2ConnectionState.h b/proxy/http2/Http2ConnectionState.h
index 94d1d2c..b0b46c5 100644
--- a/proxy/http2/Http2ConnectionState.h
+++ b/proxy/http2/Http2ConnectionState.h
@@ -220,7 +220,7 @@ public:
   void send_data_frames(Http2Stream *stream);
   Http2SendADataFrameResult send_a_data_frame(Http2Stream *stream, size_t &payload_length);
   void send_headers_frame(Http2Stream *stream);
-  void send_push_promise_frame(Http2Stream *stream, URL &url);
+  void send_push_promise_frame(Http2Stream *stream, URL &url, const MIMEField *accept_encoding);
   void send_rst_stream_frame(Http2StreamId id, Http2ErrorCode ec);
   void send_settings_frame(const Http2ConnectionSettings &new_settings);
   void send_ping_frame(Http2StreamId id, uint8_t flag, const uint8_t *opaque_data);
diff --git a/proxy/http2/Http2DependencyTree.h b/proxy/http2/Http2DependencyTree.h
index 007603b..d4b44ee 100644
--- a/proxy/http2/Http2DependencyTree.h
+++ b/proxy/http2/Http2DependencyTree.h
@@ -37,80 +37,80 @@
 const static uint32_t K                               = 256;
 const static uint32_t HTTP2_DEPENDENCY_TREE_MAX_DEPTH = 256;
 
-template <typename T> class Http2DependencyTree
+namespace Http2DependencyTree
+{
+class Node
 {
 public:
-  class Node
+  Node(void *t = nullptr) : t(t)
   {
-  public:
-    Node()
-      : active(false),
-        queued(false),
-        id(HTTP2_PRIORITY_DEFAULT_STREAM_DEPENDENCY),
-        weight(HTTP2_PRIORITY_DEFAULT_WEIGHT),
-        point(0),
-        parent(NULL),
-        t(NULL)
-    {
-      entry = new PriorityQueueEntry<Node *>(this);
-      queue = new PriorityQueue<Node *>();
-    }
-    Node(uint32_t i, uint32_t w, uint32_t p, Node *n, T t)
-      : active(false), queued(false), id(i), weight(w), point(p), parent(n), t(t)
-    {
-      entry = new PriorityQueueEntry<Node *>(this);
-      queue = new PriorityQueue<Node *>();
-    }
+    entry = new PriorityQueueEntry<Node *>(this);
+    queue = new PriorityQueue<Node *>();
+  }
+
+  Node(uint32_t i, uint32_t w, uint32_t p, Node *n, void *t = nullptr) : id(i), weight(w), point(p), t(t), parent(n)
+  {
+    entry = new PriorityQueueEntry<Node *>(this);
+    queue = new PriorityQueue<Node *>();
+  }
 
-    ~Node()
-    {
-      delete entry;
-      delete queue;
-
-      // delete all child nodes
-      if (!children.empty()) {
-        Node *node = children.head;
-        Node *next = NULL;
-        while (node) {
-          next = node->link.next;
-          children.remove(node);
-          delete node;
-          node = next;
-        }
+  ~Node()
+  {
+    delete entry;
+    delete queue;
+
+    // delete all child nodes
+    if (!children.empty()) {
+      Node *node = children.head;
+      Node *next = nullptr;
+      while (node) {
+        next = node->link.next;
+        children.remove(node);
+        delete node;
+        node = next;
       }
     }
+  }
 
-    LINK(Node, link);
+  LINK(Node, link);
 
-    bool
-    operator<(const Node &n) const
-    {
-      return point < n.point;
-    }
-    bool
-    operator>(const Node &n) const
-    {
-      return point > n.point;
-    }
+  bool
+  operator<(const Node &n) const
+  {
+    return point < n.point;
+  }
+  bool
+  operator>(const Node &n) const
+  {
+    return point > n.point;
+  }
 
-    bool active;
-    bool queued;
-    uint32_t id;
-    uint32_t weight;
-    uint32_t point;
-    Node *parent;
-    DLL<Node> children;
-    PriorityQueueEntry<Node *> *entry;
-    PriorityQueue<Node *> *queue;
-    T t;
-  };
-
-  Http2DependencyTree(uint32_t max_concurrent_streams)
-    : _root(new Node()), _max_depth(MIN(max_concurrent_streams, HTTP2_DEPENDENCY_TREE_MAX_DEPTH)), _node_count(0)
+  bool
+  is_shadow() const
   {
+    return t == nullptr;
   }
-  ~Http2DependencyTree() { delete _root; }
+
+  bool active     = false;
+  bool queued     = false;
+  uint32_t id     = HTTP2_PRIORITY_DEFAULT_STREAM_DEPENDENCY;
+  uint32_t weight = HTTP2_PRIORITY_DEFAULT_WEIGHT;
+  uint32_t point  = 0;
+  void *t         = nullptr;
+  Node *parent    = nullptr;
+  DLL<Node> children;
+  PriorityQueueEntry<Node *> *entry;
+  PriorityQueue<Node *> *queue;
+};
+
+template <typename T> class Tree
+{
+public:
+  Tree(uint32_t max_concurrent_streams) : _max_depth(MIN(max_concurrent_streams, HTTP2_DEPENDENCY_TREE_MAX_DEPTH)) {}
+
+  ~Tree() { delete _root; }
   Node *find(uint32_t id);
+  Node *find_shadow(uint32_t id);
   Node *add(uint32_t parent_id, uint32_t id, uint32_t weight, bool exclusive, T t);
   void remove(Node *node);
   void reprioritize(uint32_t new_parent_id, uint32_t id, bool exclusive);
@@ -126,27 +126,27 @@ private:
   Node *_top(Node *node);
   void _change_parent(Node *new_parent, Node *node, bool exclusive);
 
-  Node *_root;
+  Node *_root = new Node(this);
   uint32_t _max_depth;
-  uint32_t _node_count;
+  uint32_t _node_count = 0;
 };
 
 template <typename T>
-typename Http2DependencyTree<T>::Node *
-Http2DependencyTree<T>::_find(Node *node, uint32_t id, uint32_t depth)
+Node *
+Tree<T>::_find(Node *node, uint32_t id, uint32_t depth)
 {
   if (node->id == id) {
     return node;
   }
 
   if (node->children.empty() || depth >= _max_depth) {
-    return NULL;
+    return nullptr;
   }
 
-  Node *result = NULL;
+  Node *result = nullptr;
   for (Node *n = node->children.head; n; n = n->link.next) {
     result = _find(n, id, ++depth);
-    if (result != NULL) {
+    if (result != nullptr) {
       break;
     }
   }
@@ -155,32 +155,56 @@ Http2DependencyTree<T>::_find(Node *node, uint32_t id, uint32_t depth)
 }
 
 template <typename T>
-typename Http2DependencyTree<T>::Node *
-Http2DependencyTree<T>::find(uint32_t id)
+Node *
+Tree<T>::find_shadow(uint32_t id)
 {
   return _find(_root, id);
 }
 
 template <typename T>
-typename Http2DependencyTree<T>::Node *
-Http2DependencyTree<T>::add(uint32_t parent_id, uint32_t id, uint32_t weight, bool exclusive, T t)
+Node *
+Tree<T>::find(uint32_t id)
+{
+  Node *n = _find(_root, id);
+  return n == nullptr ? nullptr : (n->is_shadow() ? nullptr : n);
+}
+
+template <typename T>
+Node *
+Tree<T>::add(uint32_t parent_id, uint32_t id, uint32_t weight, bool exclusive, T t)
 {
   Node *parent = find(parent_id);
-  if (parent == NULL) {
-    parent = _root;
+  if (parent == nullptr) {
+    parent = add(0, parent_id, HTTP2_PRIORITY_DEFAULT_WEIGHT, false, nullptr);
+  }
+
+  Node *node = find_shadow(id);
+  if (node != nullptr && node->is_shadow()) {
+    node->t      = t;
+    node->point  = id;
+    node->weight = weight;
+    return node;
   }
 
   // Use stream id as initial point
-  Node *node = new Node(id, weight, id, parent, t);
+  node = new Node(id, weight, id, parent, t);
 
   if (exclusive) {
     while (Node *child = parent->children.pop()) {
+      if (child->queued) {
+        parent->queue->erase(child->entry);
+        node->queue->push(child->entry);
+      }
       node->children.push(child);
       child->parent = node;
     }
   }
 
   parent->children.push(node);
+  if (!node->queue->empty()) {
+    parent->queue->push(node->entry);
+    node->queued = true;
+  }
 
   ++_node_count;
   return node;
@@ -188,7 +212,7 @@ Http2DependencyTree<T>::add(uint32_t parent_id, uint32_t id, uint32_t weight, bo
 
 template <typename T>
 void
-Http2DependencyTree<T>::remove(Node *node)
+Tree<T>::remove(Node *node)
 {
   if (node == _root || node->active) {
     return;
@@ -213,16 +237,21 @@ Http2DependencyTree<T>::remove(Node *node)
     child->parent = parent;
   }
 
+  // delete the shadow parent
+  if (parent->is_shadow() && parent->children.empty() && parent->queue->empty()) {
+    remove(parent);
+  }
+
   --_node_count;
   delete node;
 }
 
 template <typename T>
 void
-Http2DependencyTree<T>::reprioritize(uint32_t id, uint32_t new_parent_id, bool exclusive)
+Tree<T>::reprioritize(uint32_t id, uint32_t new_parent_id, bool exclusive)
 {
   Node *node = find(id);
-  if (node == NULL) {
+  if (node == nullptr) {
     return;
   }
 
@@ -231,9 +260,9 @@ Http2DependencyTree<T>::reprioritize(uint32_t id, uint32_t new_parent_id, bool e
 
 template <typename T>
 void
-Http2DependencyTree<T>::reprioritize(Node *node, uint32_t new_parent_id, bool exclusive)
+Tree<T>::reprioritize(Node *node, uint32_t new_parent_id, bool exclusive)
 {
-  if (node == NULL) {
+  if (node == nullptr) {
     return;
   }
 
@@ -242,25 +271,49 @@ Http2DependencyTree<T>::reprioritize(Node *node, uint32_t new_parent_id, bool ex
     // Do nothing
     return;
   }
+  // should not change the root node
+  ink_assert(node->parent);
 
   Node *new_parent = find(new_parent_id);
-  if (new_parent == NULL) {
+  if (new_parent == nullptr) {
     return;
   }
   _change_parent(new_parent, old_parent, false);
   _change_parent(node, new_parent, exclusive);
+
+  // delete the shadow node
+  if (node->is_shadow() && node->children.empty() && node->queue->empty()) {
+    remove(node);
+  }
 }
 
 // Change node's parent to new_parent
 template <typename T>
 void
-Http2DependencyTree<T>::_change_parent(Node *node, Node *new_parent, bool exclusive)
+Tree<T>::_change_parent(Node *node, Node *new_parent, bool exclusive)
 {
+  ink_release_assert(node->parent != nullptr);
   node->parent->children.remove(node);
-  node->parent = NULL;
+  if (node->queued) {
+    node->parent->queue->erase(node->entry);
+    node->queued = false;
+
+    Node *current = node->parent;
+    while (current->queue->empty() && !current->active && current->parent != nullptr) {
+      current->parent->queue->erase(current->entry);
+      current->queued = false;
+      current         = current->parent;
+    }
+  }
 
+  node->parent = nullptr;
   if (exclusive) {
     while (Node *child = new_parent->children.pop()) {
+      if (child->queued) {
+        child->parent->queue->erase(child->entry);
+        node->queue->push(child->entry);
+      }
+
       node->children.push(child);
       child->parent = node;
     }
@@ -268,21 +321,30 @@ Http2DependencyTree<T>::_change_parent(Node *node, Node *new_parent, bool exclus
 
   new_parent->children.push(node);
   node->parent = new_parent;
+
+  if (node->active || !node->queue->empty()) {
+    Node *current = node;
+    while (current->parent != nullptr && !current->queued) {
+      current->parent->queue->push(current->entry);
+      current->queued = true;
+      current         = current->parent;
+    }
+  }
 }
 
 template <typename T>
-typename Http2DependencyTree<T>::Node *
-Http2DependencyTree<T>::_top(Node *node)
+Node *
+Tree<T>::_top(Node *node)
 {
   Node *child = node;
 
-  while (child != NULL) {
+  while (child != nullptr) {
     if (child->active) {
       return child;
     } else if (!child->queue->empty()) {
       child = child->queue->top()->node;
     } else {
-      return NULL;
+      return nullptr;
     }
   }
 
@@ -290,19 +352,19 @@ Http2DependencyTree<T>::_top(Node *node)
 }
 
 template <typename T>
-typename Http2DependencyTree<T>::Node *
-Http2DependencyTree<T>::top()
+Node *
+Tree<T>::top()
 {
   return _top(_root);
 }
 
 template <typename T>
 void
-Http2DependencyTree<T>::activate(Node *node)
+Tree<T>::activate(Node *node)
 {
   node->active = true;
 
-  while (node->parent != NULL && !node->queued) {
+  while (node->parent != nullptr && !node->queued) {
     node->parent->queue->push(node->entry);
     node->queued = true;
     node         = node->parent;
@@ -311,12 +373,12 @@ Http2DependencyTree<T>::activate(Node *node)
 
 template <typename T>
 void
-Http2DependencyTree<T>::deactivate(Node *node, uint32_t sent)
+Tree<T>::deactivate(Node *node, uint32_t sent)
 {
   node->active = false;
 
-  while (node->queue->empty() && node->parent != NULL) {
-    node->parent->queue->pop();
+  while (node->queue->empty() && node->parent != nullptr) {
+    node->parent->queue->erase(node->entry);
     node->queued = false;
 
     node = node->parent;
@@ -327,9 +389,9 @@ Http2DependencyTree<T>::deactivate(Node *node, uint32_t sent)
 
 template <typename T>
 void
-Http2DependencyTree<T>::update(Node *node, uint32_t sent)
+Tree<T>::update(Node *node, uint32_t sent)
 {
-  while (node->parent != NULL) {
+  while (node->parent != nullptr) {
     node->point += sent * K / (node->weight + 1);
 
     if (node->queued) {
@@ -345,9 +407,9 @@ Http2DependencyTree<T>::update(Node *node, uint32_t sent)
 
 template <typename T>
 uint32_t
-Http2DependencyTree<T>::size() const
+Tree<T>::size() const
 {
   return _node_count;
 }
-
+} // namespce Http2DependencyTree
 #endif // __HTTP2_DEP_TREE_H__
diff --git a/proxy/http2/Http2Stream.cc b/proxy/http2/Http2Stream.cc
index 24c9f3d..c5aeac1 100644
--- a/proxy/http2/Http2Stream.cc
+++ b/proxy/http2/Http2Stream.cc
@@ -619,10 +619,10 @@ Http2Stream::update_write_request(IOBufferReader *buf_reader, int64_t write_len,
 }
 
 void
-Http2Stream::push_promise(URL &url)
+Http2Stream::push_promise(URL &url, const MIMEField *accept_encoding)
 {
   Http2ClientSession *parent = static_cast<Http2ClientSession *>(this->get_parent());
-  parent->connection_state.send_push_promise_frame(this, url);
+  parent->connection_state.send_push_promise_frame(this, url, accept_encoding);
 }
 
 void
diff --git a/proxy/http2/Http2Stream.h b/proxy/http2/Http2Stream.h
index d9828cc..fbdef7a 100644
--- a/proxy/http2/Http2Stream.h
+++ b/proxy/http2/Http2Stream.h
@@ -33,7 +33,7 @@
 class Http2Stream;
 class Http2ConnectionState;
 
-typedef Http2DependencyTree<Http2Stream *> DependencyTree;
+typedef Http2DependencyTree::Tree<Http2Stream *> DependencyTree;
 
 class Http2Stream : public ProxyClientTransaction
 {
@@ -89,6 +89,12 @@ public:
     return _id;
   }
 
+  int
+  get_transaction_id() const override
+  {
+    return _id;
+  }
+
   Http2StreamState
   get_state() const
   {
@@ -147,7 +153,7 @@ public:
   void reenable(VIO *vio) override;
   virtual void transaction_done() override;
   void send_response_body();
-  void push_promise(URL &url);
+  void push_promise(URL &url, const MIMEField *accept_encoding);
 
   // Stream level window size
   ssize_t client_rwnd;
@@ -167,10 +173,10 @@ public:
   bool is_first_transaction_flag = false;
 
   HTTPHdr response_header;
-  IOBufferReader *response_reader     = nullptr;
-  IOBufferReader *request_reader      = nullptr;
-  MIOBuffer request_buffer            = CLIENT_CONNECTION_FIRST_READ_BUFFER_SIZE_INDEX;
-  DependencyTree::Node *priority_node = nullptr;
+  IOBufferReader *response_reader          = nullptr;
+  IOBufferReader *request_reader           = nullptr;
+  MIOBuffer request_buffer                 = CLIENT_CONNECTION_FIRST_READ_BUFFER_SIZE_INDEX;
+  Http2DependencyTree::Node *priority_node = nullptr;
 
   EThread *
   get_thread()
diff --git a/proxy/http2/Makefile.am b/proxy/http2/Makefile.am
index 848b71c..3124e46 100644
--- a/proxy/http2/Makefile.am
+++ b/proxy/http2/Makefile.am
@@ -89,7 +89,7 @@ test_HPACK_LDADD = \
   $(top_builddir)/mgmt/libmgmt_p.la \
   $(top_builddir)/proxy/shared/libUglyLogStubs.a \
   @LIBTCL@ \
-  @HWLOC_LIBS@
+  @HWLOC_LIBS@ @LIBCAP@
 
 test_HPACK_SOURCES = \
   test_HPACK.cc \
diff --git a/proxy/http2/test_Http2DependencyTree.cc b/proxy/http2/test_Http2DependencyTree.cc
index 5056cbd..20ee49c 100644
--- a/proxy/http2/test_Http2DependencyTree.cc
+++ b/proxy/http2/test_Http2DependencyTree.cc
@@ -31,7 +31,8 @@
 
 using namespace std;
 
-using Tree = Http2DependencyTree<std::string *>;
+using Tree = Http2DependencyTree::Tree<std::string *>;
+using Node = Http2DependencyTree::Node;
 
 /**
  * Exclusive Dependency Creation
@@ -53,9 +54,9 @@ REGRESSION_TEST(Http2DependencyTree_1)(RegressionTest *t, int /* atype ATS_UNUSE
   tree->add(0, 1, 0, false, &b);
   tree->add(0, 3, 0, false, &c);
 
-  Tree::Node *node_a = tree->find(0);
-  Tree::Node *node_b = tree->find(1);
-  Tree::Node *node_c = tree->find(3);
+  Node *node_a = tree->find(0);
+  Node *node_b = tree->find(1);
+  Node *node_c = tree->find(3);
 
   box.check(node_b->parent == node_a, "parent of B should be A");
   box.check(node_c->parent == node_a, "parent of C should be A");
@@ -63,7 +64,7 @@ REGRESSION_TEST(Http2DependencyTree_1)(RegressionTest *t, int /* atype ATS_UNUSE
   // Add node with exclusive flag
   tree->add(0, 5, 0, true, &d);
 
-  Tree::Node *node_d = tree->find(5);
+  Node *node_d = tree->find(5);
 
   box.check(node_d->parent == node_a, "parent of D should be A");
   box.check(node_b->parent == node_d, "parent of B should be D");
@@ -102,10 +103,10 @@ REGRESSION_TEST(Http2DependencyTree_2)(RegressionTest *t, int /* atype ATS_UNUSE
 
   tree->reprioritize(1, 7, false);
 
-  Tree::Node *node_x = tree->find(0);
-  Tree::Node *node_a = tree->find(1);
-  Tree::Node *node_d = tree->find(7);
-  Tree::Node *node_f = tree->find(11);
+  Node *node_x = tree->find(0);
+  Node *node_a = tree->find(1);
+  Node *node_d = tree->find(7);
+  Node *node_f = tree->find(11);
 
   box.check(node_a->parent == node_d, "parent of A should be D");
   box.check(node_d->parent == node_x, "parent of D should be X");
@@ -144,10 +145,10 @@ REGRESSION_TEST(Http2DependencyTree_3)(RegressionTest *t, int /* atype ATS_UNUSE
 
   tree->reprioritize(1, 7, true);
 
-  Tree::Node *node_x = tree->find(0);
-  Tree::Node *node_a = tree->find(1);
-  Tree::Node *node_d = tree->find(7);
-  Tree::Node *node_f = tree->find(11);
+  Node *node_x = tree->find(0);
+  Node *node_a = tree->find(1);
+  Node *node_d = tree->find(7);
+  Node *node_f = tree->find(11);
 
   box.check(node_a->parent == node_d, "parent of A should be D");
   box.check(node_d->parent == node_x, "parent of D should be X");
@@ -171,15 +172,15 @@ REGRESSION_TEST(Http2DependencyTree_4)(RegressionTest *t, int /* atype ATS_UNUSE
   string a("A");
   tree->add(0, 1, 0, false, &a);
 
-  Tree::Node *node_a = tree->find(1);
+  Node *node_a = tree->find(1);
 
-  box.check(tree->top() == nullptr, "top should be NULL");
+  box.check(tree->top() == nullptr, "top should be nullptr");
 
   tree->activate(node_a);
   box.check(tree->top() == node_a, "top should be A");
 
   tree->deactivate(node_a, 0);
-  box.check(tree->top() == nullptr, "top should be NULL");
+  box.check(tree->top() == nullptr, "top should be nullptr");
 
   delete tree;
 }
@@ -204,10 +205,10 @@ REGRESSION_TEST(Http2DependencyTree_5)(RegressionTest *t, int /* atype ATS_UNUSE
   tree->add(0, 3, 15, false, &a);
   tree->add(3, 5, 15, false, &b);
 
-  Tree::Node *node_a = tree->find(3);
-  Tree::Node *node_b = tree->find(5);
+  Node *node_a = tree->find(3);
+  Node *node_b = tree->find(5);
 
-  box.check(tree->top() == nullptr, "top should be NULL");
+  box.check(tree->top() == nullptr, "top should be nullptr");
 
   tree->activate(node_a);
   tree->activate(node_b);
@@ -239,9 +240,9 @@ REGRESSION_TEST(Http2DependencyTree_6)(RegressionTest *t, int /* atype ATS_UNUSE
 
   // NOTE, weight is actual weight - 1
   tree->add(0, 3, 20, false, &a); // node_a is unused
-  Tree::Node *node_b = tree->add(3, 5, 10, false, &b);
-  Tree::Node *node_c = tree->add(3, 7, 10, false, &c);
-  Tree::Node *node_d = tree->add(0, 9, 20, false, &d);
+  Node *node_b = tree->add(3, 5, 10, false, &b);
+  Node *node_c = tree->add(3, 7, 10, false, &c);
+  Node *node_d = tree->add(0, 9, 20, false, &d);
 
   // Activate B, C and D
   tree->activate(node_b);
@@ -251,8 +252,8 @@ REGRESSION_TEST(Http2DependencyTree_6)(RegressionTest *t, int /* atype ATS_UNUSE
   ostringstream oss;
 
   for (int i = 0; i < 90; ++i) {
-    Tree::Node *node = tree->top();
-    oss << node->t->c_str();
+    Node *node = tree->top();
+    oss << static_cast<string *>(node->t)->c_str();
     tree->update(node, 100);
   }
 
@@ -279,15 +280,15 @@ REGRESSION_TEST(Http2DependencyTree_Chrome_50)(RegressionTest *t, int /* atype A
 
   string a("A"), b("B"), c("C"), d("D"), e("E"), f("F"), g("G"), h("H"), i("I");
 
-  Tree::Node *node_a = tree->add(0, 3, 255, false, &a);
-  Tree::Node *node_b = tree->add(0, 5, 255, false, &b);
-  Tree::Node *node_c = tree->add(0, 7, 255, false, &c);
-  Tree::Node *node_d = tree->add(0, 9, 182, false, &d);
-  Tree::Node *node_e = tree->add(0, 11, 182, false, &e);
-  Tree::Node *node_f = tree->add(0, 13, 182, false, &f);
-  Tree::Node *node_g = tree->add(0, 15, 146, false, &g);
-  Tree::Node *node_h = tree->add(0, 17, 146, false, &h);
-  Tree::Node *node_i = tree->add(0, 19, 146, false, &i);
+  Node *node_a = tree->add(0, 3, 255, false, &a);
+  Node *node_b = tree->add(0, 5, 255, false, &b);
+  Node *node_c = tree->add(0, 7, 255, false, &c);
+  Node *node_d = tree->add(0, 9, 182, false, &d);
+  Node *node_e = tree->add(0, 11, 182, false, &e);
+  Node *node_f = tree->add(0, 13, 182, false, &f);
+  Node *node_g = tree->add(0, 15, 146, false, &g);
+  Node *node_h = tree->add(0, 17, 146, false, &h);
+  Node *node_i = tree->add(0, 19, 146, false, &i);
 
   // Activate nodes from A to I
   tree->activate(node_a);
@@ -303,8 +304,8 @@ REGRESSION_TEST(Http2DependencyTree_Chrome_50)(RegressionTest *t, int /* atype A
   ostringstream oss;
 
   for (int i = 0; i < 108; ++i) {
-    Tree::Node *node = tree->top();
-    oss << node->t->c_str();
+    Node *node = tree->top();
+    oss << static_cast<string *>(node->t)->c_str();
 
     tree->update(node, 16375);
   }
@@ -340,15 +341,15 @@ REGRESSION_TEST(Http2DependencyTree_Chrome_51)(RegressionTest *t, int /* atype A
 
   string a("A"), b("B"), c("C"), d("D"), e("E"), f("F"), g("G"), h("H"), i("I");
 
-  Tree::Node *node_a = tree->add(0, 3, 255, false, &a);
-  Tree::Node *node_b = tree->add(3, 5, 255, false, &b);
-  Tree::Node *node_c = tree->add(5, 7, 255, false, &c);
-  Tree::Node *node_d = tree->add(7, 9, 182, false, &d);
-  Tree::Node *node_e = tree->add(9, 11, 182, false, &e);
-  Tree::Node *node_f = tree->add(11, 13, 182, false, &f);
-  Tree::Node *node_g = tree->add(13, 15, 146, false, &g);
-  Tree::Node *node_h = tree->add(15, 17, 146, false, &h);
-  Tree::Node *node_i = tree->add(17, 19, 146, false, &i);
+  Node *node_a = tree->add(0, 3, 255, false, &a);
+  Node *node_b = tree->add(3, 5, 255, false, &b);
+  Node *node_c = tree->add(5, 7, 255, false, &c);
+  Node *node_d = tree->add(7, 9, 182, false, &d);
+  Node *node_e = tree->add(9, 11, 182, false, &e);
+  Node *node_f = tree->add(11, 13, 182, false, &f);
+  Node *node_g = tree->add(13, 15, 146, false, &g);
+  Node *node_h = tree->add(15, 17, 146, false, &h);
+  Node *node_i = tree->add(17, 19, 146, false, &i);
 
   // Activate nodes A, C, E, G, and I
   tree->activate(node_a);
@@ -360,9 +361,9 @@ REGRESSION_TEST(Http2DependencyTree_Chrome_51)(RegressionTest *t, int /* atype A
   ostringstream oss;
 
   for (int i = 0; i < 9; ++i) {
-    Tree::Node *node = tree->top();
+    Node *node = tree->top();
     if (node != nullptr) {
-      oss << node->t->c_str();
+      oss << static_cast<string *>(node->t)->c_str();
 
       tree->deactivate(node, 16384);
       tree->remove(node);
@@ -376,9 +377,9 @@ REGRESSION_TEST(Http2DependencyTree_Chrome_51)(RegressionTest *t, int /* atype A
   tree->activate(node_h);
 
   for (int i = 0; i < 9; ++i) {
-    Tree::Node *node = tree->top();
+    Node *node = tree->top();
     if (node != nullptr) {
-      oss << node->t->c_str();
+      oss << static_cast<string *>(node->t)->c_str();
 
       tree->deactivate(node, 16384);
       tree->remove(node);
@@ -412,16 +413,16 @@ REGRESSION_TEST(Http2DependencyTree_remove_1)(RegressionTest *t, int /* atype AT
   string a("A"), b("B"), c("C");
 
   // NOTE, weight is actual weight - 1
-  Tree::Node *node_a = tree->add(0, 3, 30, false, &a);
-  Tree::Node *node_b = tree->add(3, 5, 20, false, &b);
-  Tree::Node *node_c = tree->add(3, 7, 10, false, &c);
+  Node *node_a = tree->add(0, 3, 30, false, &a);
+  Node *node_b = tree->add(3, 5, 20, false, &b);
+  Node *node_c = tree->add(3, 7, 10, false, &c);
 
   // Activate A, B, and C
   tree->activate(node_a);
   tree->activate(node_b);
   tree->activate(node_c);
 
-  Tree::Node *top_node = nullptr;
+  Node *top_node = nullptr;
 
   // Deactivate A and try to remove
   top_node = tree->top();
@@ -468,9 +469,9 @@ REGRESSION_TEST(Http2DependencyTree_remove_2)(RegressionTest *t, int /* atype AT
   string a("A"), b("B"), c("C");
 
   // NOTE, weight is actual weight - 1
-  Tree::Node *node_a = tree->add(0, 3, 20, false, &a);
-  Tree::Node *node_b = tree->add(3, 5, 10, false, &b);
-  Tree::Node *node_c = tree->add(5, 7, 10, false, &c);
+  Node *node_a = tree->add(0, 3, 20, false, &a);
+  Node *node_b = tree->add(3, 5, 10, false, &b);
+  Node *node_c = tree->add(5, 7, 10, false, &c);
 
   // Activate, deactivate, and remove C
   tree->activate(node_c);
@@ -490,7 +491,7 @@ REGRESSION_TEST(Http2DependencyTree_remove_2)(RegressionTest *t, int /* atype AT
   tree->deactivate(node_b, 16384);
   tree->remove(node_b);
 
-  box.check(tree->top() == nullptr, "Top node should be NULL");
+  box.check(tree->top() == nullptr, "Top node should be nullptr");
   box.check(tree->find(3) == nullptr, "Tree should be empty");
   box.check(tree->find(5) == nullptr, "Tree should be empty");
   box.check(tree->find(7) == nullptr, "Tree should be empty");
@@ -498,6 +499,256 @@ REGRESSION_TEST(Http2DependencyTree_remove_2)(RegressionTest *t, int /* atype AT
   delete tree;
 }
 
+/**
+ * Exclusive Dependency Creation
+ *
+ *       A            A
+ *      / \    =>     |
+ *     B   C          D
+ *                   / \
+ *                  B   C
+ */
+REGRESSION_TEST(Http2DependencyTree_exclusive_node)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+  string a("A"), b("B"), c("C"), d("D");
+
+  Node *B = tree->add(0, 1, 0, false, &b);
+  tree->add(0, 3, 0, false, &c);
+
+  tree->activate(B);
+  // Add node with exclusive flag
+  tree->add(0, 5, 0, true, &d);
+
+  tree->deactivate(B, 0);
+  tree->remove(B);
+
+  box.check(tree->top() == nullptr, "Tree top should be nullptr");
+
+  delete tree;
+}
+
+/** test for reprioritize with active node
+*
+*     root                  root                   root
+*    /    \                /    \   (remove A)    /    \
+*   A      B   =======>   C      B   =======>    C      B
+*           \            /
+*            C          A
+*
+*/
+REGRESSION_TEST(Http2DependencyTree_reprioritize)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+  string a("A"), b("B"), c("C");
+
+  Node *A = tree->add(0, 7, 70, false, &a);
+  Node *B = tree->add(0, 3, 10, false, &b);
+  Node *C = tree->add(3, 5, 30, false, &c);
+
+  tree->activate(A);
+  tree->activate(B);
+  tree->activate(C);
+
+  tree->reprioritize(A, 5, false);
+
+  tree->deactivate(A, 0);
+  tree->remove(A);
+
+  box.check(tree->top()->t != nullptr, "should not core dump");
+
+  delete tree;
+}
+
+/**
+ * Reprioritization (exclusive)
+ *
+ *    x              x
+ *    |              |
+ *    A              D
+ *   / \             |
+ *  B   C     ==>    A
+ *     / \          /|\
+ *    D   E        B C F
+ *    |              |
+ *    F              E
+ */
+REGRESSION_TEST(Http2DependencyTree_reprioritize_2)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+  string a("A"), b("B"), c("C"), d("D"), e("E"), f("F");
+
+  tree->add(0, 1, 0, false, &a);
+  tree->add(1, 3, 0, false, &b);
+  tree->add(1, 5, 0, false, &c);
+  tree->add(5, 7, 0, false, &d);
+  tree->add(5, 9, 0, false, &e);
+  tree->add(7, 11, 0, false, &f);
+
+  Node *node_x = tree->find(0);
+  Node *node_a = tree->find(1);
+  Node *node_b = tree->find(3);
+  Node *node_d = tree->find(7);
+
+  tree->activate(node_b);
+  box.check(node_x->queue->in(node_a->entry), "A should be in x's queue");
+
+  tree->reprioritize(1, 7, true);
+
+  box.check(!node_x->queue->in(node_a->entry), "A should not be in x's queue");
+  box.check(node_x->queue->in(node_d->entry), "D should be in x's queue");
+  box.check(node_d->queue->in(node_a->entry), "A should be in d's queue");
+
+  delete tree;
+}
+
+/**
+ * Reprioritization (exclusive)
+ *
+ *    x              x
+ *    |              |
+ *    A              D
+ *   / \             |
+ *  B   C     ==>    A
+ *     / \          /|\
+ *    D   E        B C F
+ *    |              |
+ *    F              E
+ */
+REGRESSION_TEST(Http2DependencyTree_reprioritize_3)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+  string a("A"), b("B"), c("C"), d("D"), e("E"), f("F");
+
+  tree->add(0, 1, 0, false, &a);
+  tree->add(1, 3, 0, false, &b);
+  tree->add(1, 5, 0, false, &c);
+  tree->add(5, 7, 0, false, &d);
+  tree->add(5, 9, 0, false, &e);
+  tree->add(7, 11, 0, false, &f);
+
+  Node *node_x = tree->find(0);
+  Node *node_a = tree->find(1);
+  Node *node_c = tree->find(5);
+  Node *node_d = tree->find(7);
+  Node *node_f = tree->find(11);
+
+  tree->activate(node_f);
+  tree->reprioritize(1, 7, true);
+
+  box.check(node_a->queue->in(node_f->entry), "F should be in A's queue");
+  box.check(node_d->queue->in(node_a->entry), "A should be in D's queue");
+  box.check(node_x->queue->in(node_d->entry), "D should be in x's queue");
+  box.check(!node_a->queue->in(node_c->entry), "C should not be in A's queue");
+  box.check(node_c->queue->empty(), "C's queue should be empty");
+
+  delete tree;
+}
+
+/** test for https://github.com/apache/trafficserver/issues/2268
+*
+*    root            root                  root
+*    /     =====>   /    \     =======>   /    \
+*   A              A      shadow         A      shadow
+*                          \                    \
+*                           B                    B
+*                                                 \
+*                                                  C
+*
+*              root                      root
+*             /    \                    /
+*  ======>   A      shadow   =======>  A
+*                    \
+*                     C
+*/
+REGRESSION_TEST(Http2DependencyTree_insert_with_empty_parent)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+
+  string a("A"), b("B"), c("C");
+  tree->add(0, 3, 20, false, &a);
+  Node *b_n = tree->add(5, 7, 30, true, &b);
+
+  box.check(b_n->parent->id == 5, "Node B's parent should be 5");
+  box.check(tree->find(5) == nullptr, "The shadow nodes should not be found");
+  box.check(tree->find_shadow(5)->is_shadow() == true, "nodes 5 should be the shadow node");
+
+  Node *c_n = tree->add(7, 9, 30, false, &c);
+  tree->remove(b_n);
+
+  box.check(c_n->parent->id == 5, "Node C's parent should be 5");
+  box.check(tree->find(7) == nullptr, "Nodes b should be removed");
+  box.check(tree->find_shadow(5)->is_shadow() == true, "Nodes 5 should be existed after removing");
+
+  tree->remove(c_n);
+  box.check(tree->find_shadow(5) == nullptr, "Shadow nodes should be remove");
+
+  delete tree;
+}
+
+/** test for https://github.com/apache/trafficserver/issues/2268
+*
+*    root            root                  root                root
+*    /     =====>   /    \     =======>   /    \   =======>   /    \
+*   A              A      shadow         A      B            A      B
+*                          \                     \
+*                           B                     shadow
+*/
+REGRESSION_TEST(Http2DependencyTree_shadow_reprioritize)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+
+  string a("A"), b("B");
+  tree->add(0, 3, 20, false, &a);
+  tree->add(5, 7, 30, true, &b);
+
+  Node *s_n = tree->find_shadow(5);
+  box.check(s_n != nullptr && s_n->is_shadow() == true, "Shadow nodes should not be nullptr");
+
+  tree->reprioritize(s_n, 7, false);
+  box.check(tree->find_shadow(5) == nullptr, "Shadow nodes should be nullptr after reprioritizing");
+
+  delete tree;
+}
+
+REGRESSION_TEST(Http2DependencyTree_shadow_change)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
+{
+  TestBox box(t, pstatus);
+  box = REGRESSION_TEST_PASSED;
+
+  Tree *tree = new Tree(100);
+  string a("A"), b("B"), c("C");
+
+  tree->add(0, 3, 20, false, &a);
+  tree->add(5, 7, 30, true, &b);
+
+  tree->add(0, 5, 15, false, &c);
+
+  Node *c_n = tree->find(5);
+  box.check(c_n != nullptr && c_n->is_shadow() == false, "Node 5 should not be shadow node");
+  box.check(c_n->point == 5 && c_n->weight == 15, "The weight and point should be 15");
+
+  delete tree;
+}
+
 REGRESSION_TEST(Http2DependencyTree_max_depth)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
 {
   TestBox box(t, pstatus);
@@ -510,8 +761,8 @@ REGRESSION_TEST(Http2DependencyTree_max_depth)(RegressionTest *t, int /* atype A
     tree->add(i, i + 1, 16, false, &a);
   }
 
-  Tree::Node *node = tree->find(101);
-  box.check(node->parent->id == 0, "101st node should be child of root node");
+  Node *node = tree->find(101);
+  box.check(node->parent->parent->id == 0, "101st node should be child of root's child node");
 
   delete tree;
 }
diff --git a/proxy/logcat.cc b/proxy/logcat.cc
index 590804e..98f8c82 100644
--- a/proxy/logcat.cc
+++ b/proxy/logcat.cc
@@ -24,6 +24,7 @@
 #include "ts/ink_platform.h"
 #include "ts/ink_args.h"
 #include "ts/I_Layout.h"
+#include "ts/runroot.cc"
 
 #define PROGRAM_NAME "traffic_logcat"
 #define MAX_LOGBUFFER_SIZE 65536
@@ -68,7 +69,8 @@ static const ArgumentDescription argument_descriptions[] = {
   {"overwrite_output", 'w', "Overwrite existing output file(s)", "T", &overwrite_existing_file, NULL, NULL},
   {"elf2", '2', "Convert to Extended2 Logging Format", "T", &elf2_flag, NULL, NULL},
   HELP_ARGUMENT_DESCRIPTION(),
-  VERSION_ARGUMENT_DESCRIPTION()};
+  VERSION_ARGUMENT_DESCRIPTION(),
+  RUNROOT_ARGUMENT_DESCRIPTION()};
 
 /*
  * Gets the inode number of a given file
@@ -256,6 +258,7 @@ main(int /* argc ATS_UNUSED */, const char *argv[])
   //
   appVersionInfo.setup(PACKAGE_NAME, PROGRAM_NAME, PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, "");
 
+  runroot_handler(argv);
   // Before accessing file system initialize Layout engine
   Layout::create();
   // process command-line arguments
diff --git a/proxy/logging/Log.cc b/proxy/logging/Log.cc
index a4b0fea..0a2fe4d 100644
--- a/proxy/logging/Log.cc
+++ b/proxy/logging/Log.cc
@@ -388,23 +388,23 @@ Log::init_fields()
   global_field_list.add(field, false);
   ink_hash_table_insert(field_symbol_hash, "cqth", field);
 
-  field = new LogField("client_req_timestamp_squid", "cqtq", LogField::STRING, &LogAccess::marshal_client_req_timestamp_squid,
-                       (LogField::UnmarshalFunc)&LogAccess::unmarshal_str);
+  field = new LogField("client_req_timestamp_squid", "cqtq", LogField::sINT, &LogAccess::marshal_client_req_timestamp_ms,
+                       &LogAccess::unmarshal_ttmsf);
   global_field_list.add(field, false);
   ink_hash_table_insert(field_symbol_hash, "cqtq", field);
 
-  field = new LogField("client_req_timestamp_netscape", "cqtn", LogField::STRING, &LogAccess::marshal_client_req_timestamp_netscape,
-                       (LogField::UnmarshalFunc)&LogAccess::unmarshal_str);
+  field = new LogField("client_req_timestamp_netscape", "cqtn", LogField::sINT, &LogAccess::marshal_client_req_timestamp_sec,
+                       &LogAccess::unmarshal_int_to_netscape_str);
   global_field_list.add(field, false);
   ink_hash_table_insert(field_symbol_hash, "cqtn", field);
 
-  field = new LogField("client_req_timestamp_date", "cqtd", LogField::STRING, &LogAccess::marshal_client_req_timestamp_date,
-                       (LogField::UnmarshalFunc)&LogAccess::unmarshal_str);
+  field = new LogField("client_req_timestamp_date", "cqtd", LogField::sINT, &LogAccess::marshal_client_req_timestamp_sec,
+                       &LogAccess::unmarshal_int_to_date_str);
   global_field_list.add(field, false);
   ink_hash_table_insert(field_symbol_hash, "cqtd", field);
 
-  field = new LogField("client_req_timestamp_time", "cqtt", LogField::STRING, &LogAccess::marshal_client_req_timestamp_time,
-                       (LogField::UnmarshalFunc)&LogAccess::unmarshal_str);
+  field = new LogField("client_req_timestamp_time", "cqtt", LogField::sINT, &LogAccess::marshal_client_req_timestamp_sec,
+                       &LogAccess::unmarshal_int_to_time_str);
   global_field_list.add(field, false);
   ink_hash_table_insert(field_symbol_hash, "cqtt", field);
 
@@ -838,6 +838,16 @@ Log::init_fields()
   global_field_list.add(field, false);
   ink_hash_table_insert(field_symbol_hash, "fsiz", field);
 
+  field = new LogField("client_connection_id", "ccid", LogField::sINT, &LogAccess::marshal_client_http_connection_id,
+                       &LogAccess::unmarshal_int_to_str);
+  global_field_list.add(field, false);
+  ink_hash_table_insert(field_symbol_hash, "ccid", field);
+
+  field = new LogField("client_transaction_id", "ctid", LogField::sINT, &LogAccess::marshal_client_http_transaction_id,
+                       &LogAccess::unmarshal_int_to_str);
+  global_field_list.add(field, false);
+  ink_hash_table_insert(field_symbol_hash, "ctid", field);
+
   Ptr<LogFieldAliasTable> entry_type_map = make_ptr(new LogFieldAliasTable);
   entry_type_map->init(N_LOG_ENTRY_TYPES, LOG_ENTRY_HTTP, "LOG_ENTRY_HTTP");
   field = new LogField("log_entry_type", "etype", LogField::sINT, &LogAccess::marshal_entry_type, &LogAccess::unmarshal_entry_type,
diff --git a/proxy/logging/LogAccess.cc b/proxy/logging/LogAccess.cc
index eb7aad7..916db70 100644
--- a/proxy/logging/LogAccess.cc
+++ b/proxy/logging/LogAccess.cc
@@ -216,10 +216,7 @@ LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_uuid, DEFAULT_STR_FIELD)
   -------------------------------------------------------------------------*/
 
 LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_timestamp_sec, DEFAULT_INT_FIELD)
-LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_timestamp_squid, DEFAULT_STR_FIELD)
-LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_timestamp_netscape, DEFAULT_STR_FIELD)
-LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_timestamp_date, DEFAULT_STR_FIELD)
-LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_timestamp_time, DEFAULT_STR_FIELD)
+LOG_ACCESS_DEFAULT_FIELD(marshal_client_req_timestamp_ms, DEFAULT_INT_FIELD)
 
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
@@ -494,6 +491,16 @@ LogAccess::marshal_process_uuid(char *buf)
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
+LOG_ACCESS_DEFAULT_FIELD(marshal_client_http_connection_id, DEFAULT_INT_FIELD)
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+LOG_ACCESS_DEFAULT_FIELD(marshal_client_http_transaction_id, DEFAULT_INT_FIELD)
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
 int
 LogAccess::marshal_config_int_var(char *config_var, char *buf)
 {
@@ -1023,11 +1030,56 @@ LogAccess::unmarshal_ttmsf(char **buf, char *dest, int len)
   ink_assert(dest != nullptr);
 
   int64_t val = unmarshal_int(buf);
-  float secs  = (float)val / 1000;
+  double secs = (double)val / 1000;
   int val_len = snprintf(dest, len, "%.3f", secs);
   return val_len;
 }
 
+int
+LogAccess::unmarshal_int_to_date_str(char **buf, char *dest, int len)
+{
+  ink_assert(buf != nullptr);
+  ink_assert(*buf != nullptr);
+  ink_assert(dest != nullptr);
+
+  int64_t value = unmarshal_int(buf);
+  char *strval  = LogUtils::timestamp_to_date_str(value);
+  int strlen    = LogAccess::strlen(strval);
+
+  memcpy(dest, strval, strlen);
+  return strlen;
+}
+
+int
+LogAccess::unmarshal_int_to_time_str(char **buf, char *dest, int len)
+{
+  ink_assert(buf != nullptr);
+  ink_assert(*buf != nullptr);
+  ink_assert(dest != nullptr);
+
+  int64_t value = unmarshal_int(buf);
+  char *strval  = LogUtils::timestamp_to_time_str(value);
+  int strlen    = LogAccess::strlen(strval);
+
+  memcpy(dest, strval, strlen);
+  return strlen;
+}
+
+int
+LogAccess::unmarshal_int_to_netscape_str(char **buf, char *dest, int len)
+{
+  ink_assert(buf != nullptr);
+  ink_assert(*buf != nullptr);
+  ink_assert(dest != nullptr);
+
+  int64_t value = unmarshal_int(buf);
+  char *strval  = LogUtils::timestamp_to_netscape_str(value);
+  int strlen    = LogAccess::strlen(strval);
+
+  memcpy(dest, strval, strlen);
+  return strlen;
+}
+
 /*-------------------------------------------------------------------------
   LogAccess::unmarshal_http_method
 
diff --git a/proxy/logging/LogAccess.h b/proxy/logging/LogAccess.h
index e85522b..47d368b 100644
--- a/proxy/logging/LogAccess.h
+++ b/proxy/logging/LogAccess.h
@@ -174,10 +174,7 @@ public:
   inkcoreapi virtual int marshal_client_host_port(char *);              // INT
   inkcoreapi virtual int marshal_client_auth_user_name(char *);         // STR
   inkcoreapi virtual int marshal_client_req_timestamp_sec(char *);      // INT
-  inkcoreapi virtual int marshal_client_req_timestamp_squid(char *);    // STR
-  inkcoreapi virtual int marshal_client_req_timestamp_netscape(char *); // STR
-  inkcoreapi virtual int marshal_client_req_timestamp_date(char *);     // STR
-  inkcoreapi virtual int marshal_client_req_timestamp_time(char *);     // STR
+  inkcoreapi virtual int marshal_client_req_timestamp_ms(char *);       // INT
   inkcoreapi virtual int marshal_client_req_text(char *);               // STR
   inkcoreapi virtual int marshal_client_req_http_method(char *);        // STR
   inkcoreapi virtual int marshal_client_req_url(char *);                // STR
@@ -272,12 +269,14 @@ public:
 
   // other fields
   //
-  inkcoreapi virtual int marshal_transfer_time_ms(char *);    // INT
-  inkcoreapi virtual int marshal_transfer_time_s(char *);     // INT
-  inkcoreapi virtual int marshal_file_size(char *);           // INT
-  inkcoreapi virtual int marshal_plugin_identity_id(char *);  // INT
-  inkcoreapi virtual int marshal_plugin_identity_tag(char *); // STR
-  inkcoreapi virtual int marshal_process_uuid(char *);        // STR
+  inkcoreapi virtual int marshal_transfer_time_ms(char *);           // INT
+  inkcoreapi virtual int marshal_transfer_time_s(char *);            // INT
+  inkcoreapi virtual int marshal_file_size(char *);                  // INT
+  inkcoreapi virtual int marshal_plugin_identity_id(char *);         // INT
+  inkcoreapi virtual int marshal_plugin_identity_tag(char *);        // STR
+  inkcoreapi virtual int marshal_process_uuid(char *);               // STR
+  inkcoreapi virtual int marshal_client_http_connection_id(char *);  // INT
+  inkcoreapi virtual int marshal_client_http_transaction_id(char *); // INT
 
   // These two are special, in that they are shared for all log types / implementations
   inkcoreapi int marshal_entry_type(char *);                     // INT
@@ -327,6 +326,9 @@ public:
   static int unmarshal_int_to_str_hex(char **buf, char *dest, int len);
   static int unmarshal_str(char **buf, char *dest, int len, LogSlice *slice = NULL);
   static int unmarshal_ttmsf(char **buf, char *dest, int len);
+  static int unmarshal_int_to_date_str(char **buf, char *dest, int len);
+  static int unmarshal_int_to_time_str(char **buf, char *dest, int len);
+  static int unmarshal_int_to_netscape_str(char **buf, char *dest, int len);
   static int unmarshal_http_version(char **buf, char *dest, int len);
   static int unmarshal_http_text(char **buf, char *dest, int len, LogSlice *slice = NULL);
   static int unmarshal_http_status(char **buf, char *dest, int len);
diff --git a/proxy/logging/LogAccessHttp.cc b/proxy/logging/LogAccessHttp.cc
index cf5f781..93e15ce 100644
--- a/proxy/logging/LogAccessHttp.cc
+++ b/proxy/logging/LogAccessHttp.cc
@@ -436,33 +436,9 @@ LogAccessHttp::marshal_client_req_timestamp_sec(char *buf)
   -------------------------------------------------------------------------*/
 
 int
-LogAccessHttp::marshal_client_req_timestamp_squid(char *buf)
+LogAccessHttp::marshal_client_req_timestamp_ms(char *buf)
 {
-  return marshal_milestone_fmt_squid(TS_MILESTONE_UA_BEGIN, buf);
-}
-/*-------------------------------------------------------------------------
-  -------------------------------------------------------------------------*/
-
-int
-LogAccessHttp::marshal_client_req_timestamp_netscape(char *buf)
-{
-  return marshal_milestone_fmt_netscape(TS_MILESTONE_UA_BEGIN, buf);
-}
-/*-------------------------------------------------------------------------
-  -------------------------------------------------------------------------*/
-
-int
-LogAccessHttp::marshal_client_req_timestamp_date(char *buf)
-{
-  return marshal_milestone_fmt_date(TS_MILESTONE_UA_BEGIN, buf);
-}
-/*-------------------------------------------------------------------------
-  -------------------------------------------------------------------------*/
-
-int
-LogAccessHttp::marshal_client_req_timestamp_time(char *buf)
-{
-  return marshal_milestone_fmt_time(TS_MILESTONE_UA_BEGIN, buf);
+  return marshal_milestone_fmt_ms(TS_MILESTONE_UA_BEGIN, buf);
 }
 
 /*-------------------------------------------------------------------------
@@ -812,20 +788,18 @@ LogAccessHttp::marshal_client_req_id(char *buf)
 int
 LogAccessHttp::marshal_client_req_uuid(char *buf)
 {
-  if (buf) {
-    char str[TS_CRUUID_STRING_LEN + 1];
-    const char *uuid = (char *)Machine::instance()->uuid.getString();
-    int len;
+  char str[TS_CRUUID_STRING_LEN + 1];
+  const char *uuid = Machine::instance()->uuid.getString();
+  int len          = snprintf(str, sizeof(str), "%s-%" PRId64 "", uuid, m_http_sm->sm_id);
 
-    len = snprintf(str, sizeof(str), "%s-%" PRId64 "", uuid, m_http_sm->sm_id);
-    ink_assert(len < (int)sizeof(str));
+  ink_assert(len <= TS_CRUUID_STRING_LEN);
+  len = round_strlen(len + 1);
 
-    len = round_strlen(len + 1);
-    marshal_str(buf, str, len);
-    return len;
+  if (buf) {
+    marshal_str(buf, str, len); // This will pad the remaning bytes properly ...
   }
 
-  return round_strlen(TS_CRUUID_STRING_LEN + 1);
+  return len;
 }
 
 /*-------------------------------------------------------------------------
@@ -1494,6 +1468,47 @@ LogAccessHttp::marshal_file_size(char *buf)
   -------------------------------------------------------------------------*/
 
 int
+LogAccessHttp::marshal_client_http_connection_id(char *buf)
+{
+  if (buf) {
+    int64_t id = 0;
+    if (m_http_sm) {
+      auto p = m_http_sm->ua_session;
+      if (p) {
+        auto p2 = p->get_parent();
+        if (p2) {
+          id = p2->connection_id();
+        }
+      }
+    }
+    marshal_int(buf, id);
+  }
+  return INK_MIN_ALIGN;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+int
+LogAccessHttp::marshal_client_http_transaction_id(char *buf)
+{
+  if (buf) {
+    int64_t id = 0;
+    if (m_http_sm) {
+      auto p = m_http_sm->ua_session;
+      if (p) {
+        id = p->get_transaction_id();
+      }
+    }
+    marshal_int(buf, id);
+  }
+  return INK_MIN_ALIGN;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+int
 LogAccessHttp::marshal_http_header_field(LogField::Container container, char *field, char *buf)
 {
   char *str        = nullptr;
@@ -1649,14 +1664,15 @@ LogAccessHttp::marshal_http_header_field_escapify(LogField::Container container,
         fld = fld->m_next_dup;
 
         // Dups need to be comma separated.  So if there's another
-        // dup, then add a comma and a space ...
-        //
+        // dup, then add a comma and an escapified space ...
+        constexpr const char SEP[] = ",%20";
+        constexpr size_t SEP_LEN   = sizeof(SEP) - 1;
         if (fld != nullptr) {
           if (buf) {
-            memcpy(buf, ", ", 2);
-            buf += 2;
+            memcpy(buf, SEP, SEP_LEN);
+            buf += SEP_LEN;
           }
-          running_len += 2;
+          running_len += SEP_LEN;
         }
       }
 
@@ -1710,70 +1726,20 @@ int
 LogAccessHttp::marshal_milestone_fmt_sec(TSMilestonesType type, char *buf)
 {
   if (buf) {
-    struct timeval tp = ink_hrtime_to_timeval(m_http_sm->milestones[type]);
-    marshal_int(buf, tp.tv_sec);
+    ink_hrtime tsec = ink_hrtime_to_sec(m_http_sm->milestones[type]);
+    marshal_int(buf, tsec);
   }
   return INK_MIN_ALIGN;
 }
 
 int
-LogAccessHttp::marshal_milestone_fmt_squid(TSMilestonesType type, char *buf)
-{
-  struct timeval tp          = ink_hrtime_to_timeval(m_http_sm->milestones[type]);
-  const unsigned int val_len = 32;
-  char val[val_len]          = {0};
-
-  squid_timestamp_to_buf(val, val_len, tp.tv_sec, tp.tv_usec);
-
-  int len = LogAccess::strlen(val);
-
-  if (buf) {
-    marshal_str(buf, val, len);
-  }
-
-  return len;
-}
-
-int
-LogAccessHttp::marshal_milestone_fmt_netscape(TSMilestonesType type, char *buf)
-{
-  struct timeval tp = ink_hrtime_to_timeval(m_http_sm->milestones[type]);
-  char *val         = LogUtils::timestamp_to_netscape_str(tp.tv_sec);
-  int len           = LogAccess::strlen(val);
-
-  if (buf) {
-    marshal_str(buf, val, len);
-  }
-
-  return len;
-}
-
-int
-LogAccessHttp::marshal_milestone_fmt_date(TSMilestonesType type, char *buf)
+LogAccessHttp::marshal_milestone_fmt_ms(TSMilestonesType type, char *buf)
 {
-  struct timeval tp = ink_hrtime_to_timeval(m_http_sm->milestones[type]);
-  char *val         = LogUtils::timestamp_to_date_str(tp.tv_sec);
-  int len           = LogAccess::strlen(val);
-
   if (buf) {
-    marshal_str(buf, val, len);
+    ink_hrtime tmsec = ink_hrtime_to_msec(m_http_sm->milestones[type]);
+    marshal_int(buf, tmsec);
   }
-
-  return len;
-}
-
-int
-LogAccessHttp::marshal_milestone_fmt_time(TSMilestonesType type, char *buf)
-{
-  struct timeval tp = ink_hrtime_to_timeval(m_http_sm->milestones[type]);
-  char *val         = LogUtils::timestamp_to_time_str(tp.tv_sec);
-  int len           = LogAccess::strlen(val);
-
-  if (buf) {
-    marshal_str(buf, val, len);
-  }
-
-  return len;
+  return INK_MIN_ALIGN;
 }
 
 int
diff --git a/proxy/logging/LogAccessHttp.h b/proxy/logging/LogAccessHttp.h
index bcdb537..0c20ef2 100644
--- a/proxy/logging/LogAccessHttp.h
+++ b/proxy/logging/LogAccessHttp.h
@@ -42,12 +42,12 @@ class LogAccessHttp : public LogAccess
 {
 public:
   LogAccessHttp(HttpSM *sm);
-  virtual ~LogAccessHttp();
+  ~LogAccessHttp() override;
 
-  void init();
+  void init() override;
 
   LogEntryType
-  entry_type() const
+  entry_type() const override
   {
     return LOG_ENTRY_HTTP;
   }
@@ -55,128 +55,125 @@ public:
   //
   // client -> proxy fields
   //
-  virtual int marshal_client_host_ip(char *);                // STR
-  virtual int marshal_host_interface_ip(char *);             // STR
-  virtual int marshal_client_host_port(char *);              // INT
-  virtual int marshal_client_auth_user_name(char *);         // STR
-  virtual int marshal_client_req_text(char *);               // STR
-  virtual int marshal_client_req_http_method(char *);        // INT
-  virtual int marshal_client_req_url(char *);                // STR
-  virtual int marshal_client_req_url_canon(char *);          // STR
-  virtual int marshal_client_req_unmapped_url_canon(char *); // STR
-  virtual int marshal_client_req_unmapped_url_path(char *);  // STR
-  virtual int marshal_client_req_unmapped_url_host(char *);  // STR
-  virtual int marshal_client_req_url_path(char *);           // STR
-  virtual int marshal_client_req_url_scheme(char *);         // STR
-  virtual int marshal_client_req_http_version(char *);       // INT
-  virtual int marshal_client_req_protocol_version(char *);   // STR
-  virtual int marshal_client_req_header_len(char *);         // INT
-  virtual int marshal_client_req_content_len(char *);        // INT
-  virtual int marshal_client_req_squid_len(char *);          // INT
-  virtual int marshal_client_req_tcp_reused(char *);         // INT
-  virtual int marshal_client_req_is_ssl(char *);             // INT
-  virtual int marshal_client_req_ssl_reused(char *);         // INT
-  virtual int marshal_client_req_timestamp_sec(char *);      // INT
-  virtual int marshal_client_req_timestamp_squid(char *);    // STR
-  virtual int marshal_client_req_timestamp_netscape(char *); // STR
-  virtual int marshal_client_req_timestamp_date(char *);     // STR
-  virtual int marshal_client_req_timestamp_time(char *);     // STR
-  virtual int marshal_client_security_protocol(char *);      // STR
-  virtual int marshal_client_security_cipher_suite(char *);  // STR
-  virtual int marshal_client_finish_status_code(char *);     // INT
-  virtual int marshal_client_req_id(char *);                 // INT
-  virtual int marshal_client_req_uuid(char *);               // STR
+  int marshal_client_host_ip(char *) override;                // STR
+  int marshal_host_interface_ip(char *) override;             // STR
+  int marshal_client_host_port(char *) override;              // INT
+  int marshal_client_auth_user_name(char *) override;         // STR
+  int marshal_client_req_text(char *) override;               // STR
+  int marshal_client_req_http_method(char *) override;        // INT
+  int marshal_client_req_url(char *) override;                // STR
+  int marshal_client_req_url_canon(char *) override;          // STR
+  int marshal_client_req_unmapped_url_canon(char *) override; // STR
+  int marshal_client_req_unmapped_url_path(char *) override;  // STR
+  int marshal_client_req_unmapped_url_host(char *) override;  // STR
+  int marshal_client_req_url_path(char *) override;           // STR
+  int marshal_client_req_url_scheme(char *) override;         // STR
+  int marshal_client_req_http_version(char *) override;       // INT
+  int marshal_client_req_protocol_version(char *) override;   // STR
+  int marshal_client_req_header_len(char *) override;         // INT
+  int marshal_client_req_content_len(char *) override;        // INT
+  int marshal_client_req_squid_len(char *) override;          // INT
+  int marshal_client_req_tcp_reused(char *) override;         // INT
+  int marshal_client_req_is_ssl(char *) override;             // INT
+  int marshal_client_req_ssl_reused(char *) override;         // INT
+  int marshal_client_req_timestamp_sec(char *) override;      // INT
+  int marshal_client_req_timestamp_ms(char *) override;       // INT
+  int marshal_client_security_protocol(char *) override;      // STR
+  int marshal_client_security_cipher_suite(char *) override;  // STR
+  int marshal_client_finish_status_code(char *) override;     // INT
+  int marshal_client_req_id(char *) override;                 // INT
+  int marshal_client_req_uuid(char *) override;               // STR
 
   //
   // proxy -> client fields
   //
-  virtual int marshal_proxy_resp_content_type(char *);  // STR
-  virtual int marshal_proxy_resp_header_len(char *);    // INT
-  virtual int marshal_proxy_resp_content_len(char *);   // INT
-  virtual int marshal_proxy_resp_squid_len(char *);     // INT
-  virtual int marshal_proxy_resp_status_code(char *);   // INT
-  virtual int marshal_proxy_finish_status_code(char *); // INT
-  virtual int marshal_cache_result_code(char *);        // INT
-  virtual int marshal_cache_hit_miss(char *);           // INT
+  int marshal_proxy_resp_content_type(char *) override;  // STR
+  int marshal_proxy_resp_header_len(char *) override;    // INT
+  int marshal_proxy_resp_content_len(char *) override;   // INT
+  int marshal_proxy_resp_squid_len(char *) override;     // INT
+  int marshal_proxy_resp_status_code(char *) override;   // INT
+  int marshal_proxy_finish_status_code(char *) override; // INT
+  int marshal_cache_result_code(char *) override;        // INT
+  int marshal_cache_hit_miss(char *) override;           // INT
 
   //
   // proxy -> server fields
   //
-  virtual int marshal_proxy_req_header_len(char *);  // INT
-  virtual int marshal_proxy_req_content_len(char *); // INT
-  virtual int marshal_proxy_req_squid_len(char *);   // INT
-  virtual int marshal_proxy_req_server_name(char *); // STR
-  virtual int marshal_proxy_req_server_ip(char *);   // INT
-  virtual int marshal_proxy_req_server_port(char *); // INT
-  virtual int marshal_proxy_hierarchy_route(char *); // INT
-  virtual int marshal_proxy_host_port(char *);       // INT
-  virtual int marshal_proxy_req_is_ssl(char *);      // INT
+  int marshal_proxy_req_header_len(char *) override;  // INT
+  int marshal_proxy_req_content_len(char *) override; // INT
+  int marshal_proxy_req_squid_len(char *) override;   // INT
+  int marshal_proxy_req_server_name(char *) override; // STR
+  int marshal_proxy_req_server_ip(char *) override;   // INT
+  int marshal_proxy_req_server_port(char *) override; // INT
+  int marshal_proxy_hierarchy_route(char *) override; // INT
+  int marshal_proxy_host_port(char *) override;       // INT
+  int marshal_proxy_req_is_ssl(char *) override;      // INT
 
   //
   // server -> proxy fields
   //
-  virtual int marshal_server_host_ip(char *);           // INT
-  virtual int marshal_server_host_name(char *);         // STR
-  virtual int marshal_server_resp_status_code(char *);  // INT
-  virtual int marshal_server_resp_header_len(char *);   // INT
-  virtual int marshal_server_resp_content_len(char *);  // INT
-  virtual int marshal_server_resp_squid_len(char *);    // INT
-  virtual int marshal_server_resp_http_version(char *); // INT
-  virtual int marshal_server_resp_time_ms(char *);      // INT
-  virtual int marshal_server_resp_time_s(char *);       // INT
-  virtual int marshal_server_transact_count(char *);    // INT
-  virtual int marshal_server_connect_attempts(char *);  // INT
+  int marshal_server_host_ip(char *) override;           // INT
+  int marshal_server_host_name(char *) override;         // STR
+  int marshal_server_resp_status_code(char *) override;  // INT
+  int marshal_server_resp_header_len(char *) override;   // INT
+  int marshal_server_resp_content_len(char *) override;  // INT
+  int marshal_server_resp_squid_len(char *) override;    // INT
+  int marshal_server_resp_http_version(char *) override; // INT
+  int marshal_server_resp_time_ms(char *) override;      // INT
+  int marshal_server_resp_time_s(char *) override;       // INT
+  int marshal_server_transact_count(char *) override;    // INT
+  int marshal_server_connect_attempts(char *) override;  // INT
 
   //
   // cache -> client fields
   //
-  virtual int marshal_cache_resp_status_code(char *);  // INT
-  virtual int marshal_cache_resp_header_len(char *);   // INT
-  virtual int marshal_cache_resp_content_len(char *);  // INT
-  virtual int marshal_cache_resp_squid_len(char *);    // INT
-  virtual int marshal_cache_resp_http_version(char *); // INT
+  int marshal_cache_resp_status_code(char *) override;  // INT
+  int marshal_cache_resp_header_len(char *) override;   // INT
+  int marshal_cache_resp_content_len(char *) override;  // INT
+  int marshal_cache_resp_squid_len(char *) override;    // INT
+  int marshal_cache_resp_http_version(char *) override; // INT
 
   //
   // congestion control client_retry_after_time
   //
-  virtual int marshal_client_retry_after_time(char *); // INT
+  int marshal_client_retry_after_time(char *) override; // INT
 
   //
   // cache write fields
   //
-  virtual int marshal_cache_write_code(char *);           // INT
-  virtual int marshal_cache_write_transform_code(char *); // INT
+  int marshal_cache_write_code(char *) override;           // INT
+  int marshal_cache_write_transform_code(char *) override; // INT
 
   //
   // other fields
   //
-  virtual int marshal_transfer_time_ms(char *);       // INT
-  virtual int marshal_transfer_time_s(char *);        // INT
-  virtual int marshal_file_size(char *);              // INT
-  virtual int marshal_plugin_identity_id(char *);     // INT
-  virtual int marshal_plugin_identity_tag(char *);    // STR
-  virtual int marshal_cache_lookup_url_canon(char *); // STR
+  int marshal_transfer_time_ms(char *) override;           // INT
+  int marshal_transfer_time_s(char *) override;            // INT
+  int marshal_file_size(char *) override;                  // INT
+  int marshal_plugin_identity_id(char *) override;         // INT
+  int marshal_plugin_identity_tag(char *) override;        // STR
+  int marshal_cache_lookup_url_canon(char *) override;     // STR
+  int marshal_client_http_connection_id(char *) override;  // INT
+  int marshal_client_http_transaction_id(char *) override; // INT
 
   //
   // named fields from within a http header
   //
-  virtual int marshal_http_header_field(LogField::Container container, char *field, char *buf);
-  virtual int marshal_http_header_field_escapify(LogField::Container container, char *field, char *buf);
-
-  virtual int marshal_milestone(TSMilestonesType ms, char *buf);
-  virtual int marshal_milestone_fmt_sec(TSMilestonesType ms, char *buf);
-  virtual int marshal_milestone_fmt_squid(TSMilestonesType ms, char *buf);
-  virtual int marshal_milestone_fmt_netscape(TSMilestonesType ms, char *buf);
-  virtual int marshal_milestone_fmt_date(TSMilestonesType ms, char *buf);
-  virtual int marshal_milestone_fmt_time(TSMilestonesType ms, char *buf);
-  virtual int marshal_milestone_diff(TSMilestonesType ms1, TSMilestonesType ms2, char *buf);
-
-  virtual void set_client_req_url(char *, int);                // STR
-  virtual void set_client_req_url_canon(char *, int);          // STR
-  virtual void set_client_req_unmapped_url_canon(char *, int); // STR
-  virtual void set_client_req_unmapped_url_path(char *, int);  // STR
-  virtual void set_client_req_unmapped_url_host(char *, int);  // STR
-  virtual void set_client_req_url_path(char *, int);           // STR
+  int marshal_http_header_field(LogField::Container container, char *field, char *buf) override;
+  int marshal_http_header_field_escapify(LogField::Container container, char *field, char *buf) override;
+
+  int marshal_milestone(TSMilestonesType ms, char *buf) override;
+  int marshal_milestone_fmt_sec(TSMilestonesType ms, char *buf) override;
+  int marshal_milestone_diff(TSMilestonesType ms1, TSMilestonesType ms2, char *buf) override;
+
+  int marshal_milestone_fmt_ms(TSMilestonesType ms, char *buf);
+
+  void set_client_req_url(char *, int) override;                // STR
+  void set_client_req_url_canon(char *, int) override;          // STR
+  void set_client_req_unmapped_url_canon(char *, int) override; // STR
+  void set_client_req_unmapped_url_path(char *, int) override;  // STR
+  void set_client_req_unmapped_url_host(char *, int) override;  // STR
+  void set_client_req_url_path(char *, int) override;           // STR
 
   // noncopyable
   // -- member functions that are not allowed --
diff --git a/proxy/logging/LogConfig.cc b/proxy/logging/LogConfig.cc
index e818c56..61faff1 100644
--- a/proxy/logging/LogConfig.cc
+++ b/proxy/logging/LogConfig.cc
@@ -163,7 +163,7 @@ LogConfig::read_configuration_variables()
   }
 
   ats_free(logfile_dir);
-  logfile_dir = RecConfigReadLogDir();
+  logfile_dir = ats_stringdup(RecConfigReadLogDir());
 
   if (access(logfile_dir, R_OK | W_OK | X_OK) == -1) {
     // Try 'system_root_dir/var/log/trafficserver' directory
diff --git a/proxy/logging/LogField.cc b/proxy/logging/LogField.cc
index 9e42b85..981b9e3 100644
--- a/proxy/logging/LogField.cc
+++ b/proxy/logging/LogField.cc
@@ -201,6 +201,8 @@ static const milestone milestones[] = {
   {"TS_MILESTONE_SM_FINISH", TS_MILESTONE_SM_FINISH},
   {"TS_MILESTONE_PLUGIN_ACTIVE", TS_MILESTONE_PLUGIN_ACTIVE},
   {"TS_MILESTONE_PLUGIN_TOTAL", TS_MILESTONE_PLUGIN_TOTAL},
+  {"TS_MILESTONE_TLS_HANDSHAKE_START", TS_MILESTONE_TLS_HANDSHAKE_START},
+  {"TS_MILESTONE_TLS_HANDSHAKE_END", TS_MILESTONE_TLS_HANDSHAKE_END},
 };
 
 void
diff --git a/proxy/logging/LogStandalone.cc b/proxy/logging/LogStandalone.cc
index 1102c59..d2091b3 100644
--- a/proxy/logging/LogStandalone.cc
+++ b/proxy/logging/LogStandalone.cc
@@ -148,12 +148,12 @@ check_lockfile()
   pid_t holding_pid;
   char *lockfile = nullptr;
 
-  if (access(Layout::get()->runtimedir, R_OK | W_OK) == -1) {
-    fprintf(stderr, "unable to access() dir'%s': %d, %s\n", Layout::get()->runtimedir, errno, strerror(errno));
+  if (access(Layout::get()->runtimedir.c_str(), R_OK | W_OK) == -1) {
+    fprintf(stderr, "unable to access() dir'%s': %d, %s\n", Layout::get()->runtimedir.c_str(), errno, strerror(errno));
     fprintf(stderr, " please set correct path in env variable TS_ROOT \n");
     ::exit(1);
   }
-  lockfile = Layout::relative_to(Layout::get()->runtimedir, SERVER_LOCK);
+  lockfile = ats_stringdup(Layout::relative_to(Layout::get()->runtimedir, SERVER_LOCK));
 
   Lockfile server_lockfile(lockfile);
   err = server_lockfile.Get(&holding_pid);
diff --git a/proxy/logstats.cc b/proxy/logstats.cc
index 5efc506..8007a42 100644
--- a/proxy/logstats.cc
+++ b/proxy/logstats.cc
@@ -29,6 +29,7 @@
 #include "ts/HashFNV.h"
 #include "ts/ink_args.h"
 #include "ts/MatcherUtils.h"
+#include "ts/runroot.cc"
 
 // Includes and namespaces etc.
 #include "LogStandalone.cc"
@@ -653,7 +654,8 @@ static ArgumentDescription argument_descriptions[] = {
   {"debug_tags", 'T', "Colon-Separated Debug Tags", "S1023", &error_tags, nullptr, nullptr},
   {"report_per_user", 'r', "Report stats per user instead of host", "T", &cl.report_per_user, nullptr, nullptr},
   HELP_ARGUMENT_DESCRIPTION(),
-  VERSION_ARGUMENT_DESCRIPTION()};
+  VERSION_ARGUMENT_DESCRIPTION(),
+  RUNROOT_ARGUMENT_DESCRIPTION()};
 
 static const char *USAGE_LINE = "Usage: " PROGRAM_NAME " [-f logfile] [-o origin[,...]] [-O originfile] [-m minhits] [-binshv]";
 
@@ -2380,6 +2382,7 @@ main(int /* argc ATS_UNUSED */, const char *argv[])
   // build the application information structure
   appVersionInfo.setup(PACKAGE_NAME, PROGRAM_NAME, PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, "");
 
+  runroot_handler(argv);
   // Before accessing file system initialize Layout engine
   Layout::create();
 
@@ -2442,7 +2445,7 @@ main(int /* argc ATS_UNUSED */, const char *argv[])
         if (end > start) {
           char *buf;
 
-          buf = ats_strdup(line.substr(start, end).c_str());
+          buf = ats_stringdup(line.substr(start, end));
           if (buf) {
             origin_set->insert(buf);
           }
@@ -2470,7 +2473,7 @@ main(int /* argc ATS_UNUSED */, const char *argv[])
   // Do the incremental parse of the default squid log.
   if (cl.incremental) {
     // Change directory to the log dir
-    if (chdir(Layout::get()->logdir) < 0) {
+    if (chdir(Layout::get()->logdir.c_str()) < 0) {
       exit_status.set(EXIT_CRITICAL, " can't chdir to ");
       exit_status.append(Layout::get()->logdir);
       my_exit(exit_status);
@@ -2573,7 +2576,7 @@ main(int /* argc ATS_UNUSED */, const char *argv[])
       last_state.st_ino = stat_buf.st_ino;
 
       // Find the old log file.
-      dirp = opendir(Layout::get()->logdir);
+      dirp = opendir(Layout::get()->logdir.c_str());
       if (nullptr == dirp) {
         exit_status.set(EXIT_WARNING, " can't read log directory");
       } else {
diff --git a/tests/bootstrap.py b/tests/bootstrap.py
index 251136b..26904b0 100755
--- a/tests/bootstrap.py
+++ b/tests/bootstrap.py
@@ -28,7 +28,8 @@ import sys
 pip_packages = [
     "autest",
     "hyper",
-    "requests"
+    "requests",
+    "dnslib"
 ]
 
 
diff --git a/tests/gold_tests/autest-site/microDNS.test.ext b/tests/gold_tests/autest-site/microDNS.test.ext
new file mode 100644
index 0000000..f02f643
--- /dev/null
+++ b/tests/gold_tests/autest-site/microDNS.test.ext
@@ -0,0 +1,77 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+from ports import get_port
+import json
+
+
+# AddRecord registers a list of ip address against hostname
+def AddRecord(self,filename,hostname,list_ip_addr):
+
+    record = dict()
+    record[hostname] = list_ip_addr
+    return record
+
+
+#adds transaction in json format to the specified file
+def addRecordtoDNS(self,filename,hostname,list_ip_addr=[]):
+    jsondata=None
+    JFile = os.path.join(self.Variables.DataDir, filename)
+    rec = AddRecord(self,filename,hostname,list_ip_addr)
+    if not os.path.exists(os.path.dirname(JFile)):
+        os.makedirs(os.path.dirname(JFile))
+    if os.path.exists(JFile):
+        jf = open(JFile,'r')
+        jsondata = json.load(jf)
+
+    if jsondata == None:
+        jsondata = dict()
+        jsondata["mappings"]=list()
+        jsondata["mappings"].append(rec)
+        jsondata["otherwise"]=list()
+        jsondata["otherwise"].append("127.0.0.1")
+        jsondata["otherwise"].append("127.1.1.1")
+    else:
+        jsondata["mappings"].append(rec)
+    with open(JFile,'w+') as jf:
+        jf.write(json.dumps(jsondata))
+
+
+
+def MakeDNServer(obj, name,filename,IP='127.0.0.1',rr=False,options={}):
+    server_path= os.path.join(obj.Variables.AtsTestToolsDir,'microDNS/uDNS.py')
+    data_dir = os.path.join(obj.RunDirectory, name)
+    filepath = os.path.join(data_dir,filename)
+    # create Process
+    p = obj.Processes.Process(name)
+    port=get_port(p,"Port")
+    command = "python3 {3} {0} {1} {2}".format(IP, port,filepath,server_path)
+
+
+    # create process
+    p.Command = command
+    p.Setup.MakeDir(data_dir)
+    p.Variables.DataDir = data_dir
+    p.Ready = When.PortOpen(port)
+    AddMethodToInstance(p,AddRecord)
+    AddMethodToInstance(p,addRecordtoDNS)
+
+    return p
+
+AddTestRunSet(MakeDNServer,name="MakeDNServer")
+AddTestRunSet(MakeDNServer,name="MakeDNS")
diff --git a/tests/gold_tests/autest-site/ports.py b/tests/gold_tests/autest-site/ports.py
index 1f26e7e..b8cf8bf 100644
--- a/tests/gold_tests/autest-site/ports.py
+++ b/tests/gold_tests/autest-site/ports.py
@@ -59,12 +59,12 @@ def setup_port_queue(amount=1000):
         return
     try:
         # some docker setups don't have sbin setup correctly
-        new_env= os.environ.copy()
-        new_env['PATH']="/sbin:/usr/sbin:"+new_env['PATH']
+        new_env = os.environ.copy()
+        new_env['PATH'] = "/sbin:/usr/sbin:" + new_env['PATH']
         dmin, dmax = subprocess.check_output(
             ["sysctl", "net.ipv4.ip_local_port_range"],
             env=new_env
-            ).decode().split("=")[1].split()
+        ).decode().split("=")[1].split()
         dmin = int(dmin)
         dmax = int(dmax)
     except:
diff --git a/tests/gold_tests/body_factory/gold/http-get-200.gold b/tests/gold_tests/body_factory/gold/http-get-200.gold
index a5c3c38..c610ba3 100644
--- a/tests/gold_tests/body_factory/gold/http-get-200.gold
+++ b/tests/gold_tests/body_factory/gold/http-get-200.gold
@@ -1,6 +1,6 @@
 HTTP/1.1 200 OK
+``
 Content-Length: 47
-Age: 0
-Connection: keep-alive
+``
 
 This body should be returned for a GET request.
diff --git a/tests/gold_tests/body_factory/gold/http-get-304.gold b/tests/gold_tests/body_factory/gold/http-get-304.gold
index 03f3ced..d1ad0e1 100644
--- a/tests/gold_tests/body_factory/gold/http-get-304.gold
+++ b/tests/gold_tests/body_factory/gold/http-get-304.gold
@@ -1,5 +1,4 @@
 HTTP/1.1 304 Not Modified
-Age: 0
-Connection: keep-alive
+``
 Warning: 199 VERSION Proxy received unexpected 304 response; content may be stale
-
+``
diff --git a/tests/gold_tests/body_factory/gold/http-head-200.gold b/tests/gold_tests/body_factory/gold/http-head-200.gold
index 045c1e1..e40a733 100644
--- a/tests/gold_tests/body_factory/gold/http-head-200.gold
+++ b/tests/gold_tests/body_factory/gold/http-head-200.gold
@@ -1,4 +1,2 @@
 HTTP/1.1 200 OK
-Age: 0
-Connection: keep-alive
-
+``
diff --git a/tests/gold_tests/body_factory/http204_response.test.py b/tests/gold_tests/body_factory/http204_response.test.py
index 54d61dc..c9f9073 100644
--- a/tests/gold_tests/body_factory/http204_response.test.py
+++ b/tests/gold_tests/body_factory/http204_response.test.py
@@ -37,8 +37,8 @@ ts.Disk.records_config.update({
 })
 
 # Create a template body for a 204.
-body_factory_dir=ts.Variables.BODY_FACTORY_TEMPLATE_DIR
-ts.Disk.File(os.path.join(body_factory_dir, 'default', CUSTOM_TEMPLATE_204_HOST+'_default')).\
+body_factory_dir = ts.Variables.BODY_FACTORY_TEMPLATE_DIR
+ts.Disk.File(os.path.join(body_factory_dir, 'default', CUSTOM_TEMPLATE_204_HOST + '_default')).\
     WriteOn(
     """<HTML>
 <HEAD>
@@ -61,11 +61,11 @@ regex_remap_conf_file = "maps.reg"
 ts.Disk.remap_config.AddLine(
     'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host'
                     .format(DEFAULT_204_HOST, server.Variables.Port, regex_remap_conf_file)
-    )
+)
 ts.Disk.remap_config.AddLine(
     'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host @plugin=conf_remap.so @pparam=proxy.config.body_factory.template_base={0}'
                     .format(CUSTOM_TEMPLATE_204_HOST, server.Variables.Port, regex_remap_conf_file)
-    )
+)
 ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine(
     '//.*/ http://127.0.0.1:{0} @status=204'
     .format(server.Variables.Port)
diff --git a/tests/gold_tests/body_factory/http204_response_plugin.test.py b/tests/gold_tests/body_factory/http204_response_plugin.test.py
index a2ec580..7c8036b 100644
--- a/tests/gold_tests/body_factory/http204_response_plugin.test.py
+++ b/tests/gold_tests/body_factory/http204_response_plugin.test.py
@@ -34,9 +34,9 @@ regex_remap_conf_file = "maps.reg"
 
 ts.Disk.remap_config.AddLine(
     'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host'
-            .format(CUSTOM_PLUGIN_204_HOST, server.Variables.Port,
-              regex_remap_conf_file)
-    )
+    .format(CUSTOM_PLUGIN_204_HOST, server.Variables.Port,
+            regex_remap_conf_file)
+)
 ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine('//.*/ http://donotcare.test @status=204')
 
 Test.prepare_plugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'custom204plugin.cc'), ts)
diff --git a/tests/gold_tests/body_factory/http304_response.test.py b/tests/gold_tests/body_factory/http304_response.test.py
index ed672a6..84dadbc 100644
--- a/tests/gold_tests/body_factory/http304_response.test.py
+++ b/tests/gold_tests/body_factory/http304_response.test.py
@@ -37,7 +37,7 @@ regex_remap_conf_file = "maps.reg"
 ts.Disk.remap_config.AddLine(
     'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host'
                     .format(DEFAULT_304_HOST, server.Variables.Port, regex_remap_conf_file)
-    )
+)
 ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine(
     '//.*/ http://127.0.0.1:{0} @status=304'
     .format(server.Variables.Port)
diff --git a/tests/gold_tests/continuations/double.test.py b/tests/gold_tests/continuations/double.test.py
new file mode 100644
index 0000000..15b845d
--- /dev/null
+++ b/tests/gold_tests/continuations/double.test.py
@@ -0,0 +1,124 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+from random import randint
+Test.Summary = '''
+Test transactions and sessions, making sure they open and close in the proper order.
+'''
+# need Apache Benchmark. For RHEL7, this is httpd-tools
+Test.SkipUnless(
+    Condition.HasProgram("ab", "apache benchmark (httpd-tools) needs to be installed on system for this test to work")
+)
+Test.ContinueOnFail = True
+# Define default ATS
+ts = Test.MakeATSProcess("ts", command="traffic_manager")
+server = Test.MakeOriginServer("server")
+
+Test.testName = ""
+request_header = {"headers": "GET / HTTP/1.1\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+# expected response from the origin server
+response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+
+Test.prepare_plugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'continuations_verify.cc'), ts)
+
+# add response to the server dictionary
+server.addResponse("sessionfile.log", request_header, response_header)
+ts.Disk.records_config.update({
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'continuations_verify.*',
+    'proxy.config.http.cache.http' : 0, #disable cache to simply the test.
+    'proxy.config.cache.enable_read_while_writer' : 0
+})
+ts.Disk.remap_config.AddLine(
+    'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port)
+)
+
+numberOfRequests = randint(1000, 1500)
+
+# Make a *ton* of calls to the proxy!
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'ab -n {0} -c 10 http://127.0.0.1:{1}/;sleep 5'.format(numberOfRequests, ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+# time delay as proxy.config.http.wait_for_cache could be broken
+tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
+tr.Processes.Default.StartBefore(ts, ready=When.PortOpen(ts.Variables.port))
+tr.StillRunningAfter = ts
+
+comparator_command = '''
+if [ "`traffic_ctl metric get continuations_verify.{0}.close.1 | cut -d ' ' -f 2`" == "`traffic_ctl metric get continuations_verify.{0}.close.2 | cut -d ' ' -f 2`" ]; then\
+     echo yes;\
+    else \
+    echo no; \
+    fi;
+    '''
+
+records = ts.Disk.File(os.path.join(ts.Variables.RUNTIMEDIR, "records.snap"))
+
+
+def file_is_ready():
+    return os.path.exists(records.AbsPath)
+
+
+# number of sessions/transactions opened and closed are equal
+tr = Test.AddTestRun()
+tr.Processes.Process("filesleeper", "python -c 'from time import sleep; sleep(10)'")
+tr.Processes.Default.Command = comparator_command.format('ssn')
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.StartBefore(tr.Processes.filesleeper, ready=file_is_ready)
+tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents')
+tr.StillRunningAfter = ts
+# for debugging session number
+ssn1 = tr.Processes.Process("session1", 'traffic_ctl metric get continuations_verify.ssn.close.1 > ssn1')
+ssn2 = tr.Processes.Process("session2", 'traffic_ctl metric get continuations_verify.ssn.close.2 > ssn2')
+ssn1.Env = ts.Env
+ssn2.Env = ts.Env
+tr.Processes.Default.StartBefore(ssn1)
+tr.Processes.Default.StartBefore(ssn2)
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = comparator_command.format('txn')
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents')
+tr.StillRunningAfter = ts
+# for debugging transaction number
+txn1 = tr.Processes.Process("transaction1", 'traffic_ctl metric get continuations_verify.txn.close.1 > txn1')
+txn2 = tr.Processes.Process("transaction2", 'traffic_ctl metric get continuations_verify.txn.close.2 > txn2')
+txn1.Env = ts.Env
+txn2.Env = ts.Env
+tr.Processes.Default.StartBefore(txn1)
+tr.Processes.Default.StartBefore(txn2)
+
+# session count is positive,
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = "traffic_ctl metric get continuations_verify.ssn.close.1"
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression(" 0", 'should be nonzero')
+tr.StillRunningAfter = ts
+
+# and we receive the same number of transactions as we asked it to make
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = "traffic_ctl metric get continuations_verify.txn.close.1"
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ContainsExpression(
+    "continuations_verify.txn.close.1 {}".format(numberOfRequests), 'should be the number of transactions we made')
+tr.StillRunningAfter = ts
diff --git a/tests/gold_tests/h2/http2.test.py b/tests/gold_tests/h2/http2.test.py
index ddae58b..c86a988 100644
--- a/tests/gold_tests/h2/http2.test.py
+++ b/tests/gold_tests/h2/http2.test.py
@@ -102,9 +102,10 @@ tr.Processes.Default.Streams.stdout = "gold/chunked.gold"
 tr.StillRunningAfter = server
 
 # Test Case 4: Multiple request
-client_path= os.path.join(Test.Variables.AtsTestToolsDir,'traffic-replay/')
+client_path = os.path.join(Test.Variables.AtsTestToolsDir, 'traffic-replay/')
 tr = Test.AddTestRun()
-tr.Processes.Default.Command = "python3 {0} -type {1} -log_dir {2} -port {3} -host '127.0.0.1' -s_port {4} -v -colorize False".format(client_path, 'h2', server.Variables.DataDir, ts.Variables.port,ts.Variables.ssl_port)
+tr.Processes.Default.Command = "python3 {0} -type {1} -log_dir {2} -port {3} -host '127.0.0.1' -s_port {4} -v -colorize False".format(
+    client_path, 'h2', server.Variables.DataDir, ts.Variables.port, ts.Variables.ssl_port)
 tr.Processes.Default.ReturnCode = 0
 tr.Processes.Default.Streams.stdout = "gold/replay.gold"
 tr.StillRunningAfter = server
diff --git a/tests/gold_tests/headers/data/www.http408.test.txt b/tests/gold_tests/headers/data/www.http408.test.txt
new file mode 100644
index 0000000..81f9f98
--- /dev/null
+++ b/tests/gold_tests/headers/data/www.http408.test.txt
@@ -0,0 +1,5 @@
+POST / HTTP/1.1
+Host: www.http408.test
+Content-Length: 100
+
+arbitrary content
\ No newline at end of file
diff --git a/tests/gold_tests/headers/data/www.redirect0.test_get.txt b/tests/gold_tests/headers/data/www.redirect0.test_get.txt
new file mode 100644
index 0000000..40fce98
--- /dev/null
+++ b/tests/gold_tests/headers/data/www.redirect0.test_get.txt
@@ -0,0 +1,2 @@
+GET http://www.redirect0.test/ HTTP/1.1
+
diff --git a/tests/gold_tests/headers/domain-blacklist-30x.test.py b/tests/gold_tests/headers/domain-blacklist-30x.test.py
index aef08d6..5d412d5 100644
--- a/tests/gold_tests/headers/domain-blacklist-30x.test.py
+++ b/tests/gold_tests/headers/domain-blacklist-30x.test.py
@@ -23,81 +23,92 @@ Test.Summary = '''
 Tests 30x responses are returned for matching domains
 '''
 
-Test.SkipUnless(Condition.HasProgram("grep","grep needs to be installed on system for this test to work"))
+Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work"))
 
-ts=Test.MakeATSProcess("ts")
-server=Test.MakeOriginServer("server")
+ts = Test.MakeATSProcess("ts")
+server = Test.MakeOriginServer("server")
 
-REDIRECT_301_HOST='www.redirect301.test'
-REDIRECT_302_HOST='www.redirect302.test'
-REDIRECT_307_HOST='www.redirect307.test'
-REDIRECT_308_HOST='www.redirect308.test'
-PASSTHRU_HOST='www.passthrough.test'
+REDIRECT_301_HOST = 'www.redirect301.test'
+REDIRECT_302_HOST = 'www.redirect302.test'
+REDIRECT_307_HOST = 'www.redirect307.test'
+REDIRECT_308_HOST = 'www.redirect308.test'
+REDIRECT_0_HOST = 'www.redirect0.test'
+PASSTHRU_HOST = 'www.passthrough.test'
 
 ts.Disk.records_config.update({
-        'proxy.config.diags.debug.enabled': 1,
-        'proxy.config.diags.debug.tags': 'header_rewrite|dbg_header_rewrite',
-        'proxy.config.body_factory.enable_logging': 1,
-        })
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'header_rewrite|dbg_header_rewrite',
+    'proxy.config.body_factory.enable_logging': 1,
+})
 
 ts.Disk.remap_config.AddLine("""\
 regex_map http://{0}/ http://{0}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_301.conf
 regex_map http://{1}/ http://{1}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_302.conf
 regex_map http://{2}/ http://{2}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_307.conf
 regex_map http://{3}/ http://{3}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_308.conf
-""".format(REDIRECT_301_HOST, REDIRECT_302_HOST, REDIRECT_307_HOST, REDIRECT_308_HOST)
+regex_map http://{4}/ http://{4}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_0.conf
+""".format(REDIRECT_301_HOST, REDIRECT_302_HOST, REDIRECT_307_HOST, REDIRECT_308_HOST, REDIRECT_0_HOST)
 )
 
-for x in (1,2,7,8):
-  ts.Disk.MakeConfigFile("header_rewrite_rules_30{0}.conf".format(x)).AddLine("""\
-set-redirect 30{0} "%<cque>"
+for x in (0, 301, 302, 307, 308):
+    ts.Disk.MakeConfigFile("header_rewrite_rules_{0}.conf".format(x)).AddLine("""\
+set-redirect {0} "%<cque>"
 """.format(x))
 
-Test.Setup.Copy(os.path.join(os.pardir,os.pardir,'tools','tcp_client.py'))
+Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py'))
 Test.Setup.Copy('data')
 
-redirect301tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_301_HOST))
+redirect301tr = Test.AddTestRun("Test domain {0}".format(REDIRECT_301_HOST))
 redirect301tr.Processes.Default.StartBefore(Test.Processes.ts)
 redirect301tr.StillRunningAfter = ts
-redirect301tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
+redirect301tr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
     format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_301_HOST))
-redirect301tr.Processes.Default.TimeOut=5 # seconds
-redirect301tr.Processes.Default.ReturnCode=0
-redirect301tr.Processes.Default.Streams.stdout="redirect301_get.gold"
+redirect301tr.Processes.Default.TimeOut = 5  # seconds
+redirect301tr.Processes.Default.ReturnCode = 0
+redirect301tr.Processes.Default.Streams.stdout = "redirect301_get.gold"
 
-redirect302tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_302_HOST))
+redirect302tr = Test.AddTestRun("Test domain {0}".format(REDIRECT_302_HOST))
 redirect302tr.StillRunningBefore = ts
 redirect302tr.StillRunningAfter = ts
-redirect302tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
+redirect302tr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
     format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_302_HOST))
-redirect302tr.Processes.Default.TimeOut=5 # seconds
-redirect302tr.Processes.Default.ReturnCode=0
-redirect302tr.Processes.Default.Streams.stdout="redirect302_get.gold"
+redirect302tr.Processes.Default.TimeOut = 5  # seconds
+redirect302tr.Processes.Default.ReturnCode = 0
+redirect302tr.Processes.Default.Streams.stdout = "redirect302_get.gold"
 
 
-redirect307tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_307_HOST))
+redirect307tr = Test.AddTestRun("Test domain {0}".format(REDIRECT_307_HOST))
 redirect302tr.StillRunningBefore = ts
 redirect307tr.StillRunningAfter = ts
-redirect307tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
+redirect307tr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
     format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_307_HOST))
-redirect307tr.Processes.Default.TimeOut=5 # seconds
-redirect307tr.Processes.Default.ReturnCode=0
-redirect307tr.Processes.Default.Streams.stdout="redirect307_get.gold"
+redirect307tr.Processes.Default.TimeOut = 5  # seconds
+redirect307tr.Processes.Default.ReturnCode = 0
+redirect307tr.Processes.Default.Streams.stdout = "redirect307_get.gold"
 
-redirect308tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_308_HOST))
+redirect308tr = Test.AddTestRun("Test domain {0}".format(REDIRECT_308_HOST))
 redirect308tr.StillRunningBefore = ts
 redirect308tr.StillRunningAfter = ts
-redirect308tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
+redirect308tr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
     format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_308_HOST))
-redirect308tr.Processes.Default.TimeOut=5 # seconds
-redirect308tr.Processes.Default.ReturnCode=0
-redirect308tr.Processes.Default.Streams.stdout="redirect308_get.gold"
+redirect308tr.Processes.Default.TimeOut = 5  # seconds
+redirect308tr.Processes.Default.ReturnCode = 0
+redirect308tr.Processes.Default.Streams.stdout = "redirect308_get.gold"
 
-passthroughtr=Test.AddTestRun("Test domain {0}".format(PASSTHRU_HOST))
+redirect0tr = Test.AddTestRun("Test domain {0}".format(REDIRECT_0_HOST))
+redirect0tr.StillRunningBefore = ts
+redirect0tr.StillRunningAfter = ts
+redirect0tr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
+    format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_0_HOST))
+redirect0tr.Processes.Default.TimeOut = 5  # seconds
+redirect0tr.Processes.Default.ReturnCode = 0
+redirect0tr.Processes.Default.Streams.stdout = "redirect0_get.gold"
+
+passthroughtr = Test.AddTestRun("Test domain {0}".format(PASSTHRU_HOST))
 passthroughtr.StillRunningBefore = ts
 passthroughtr.StillRunningAfter = ts
-passthroughtr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
+passthroughtr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\
     format(ts.Variables.port, 'data/{0}_get.txt'.format(PASSTHRU_HOST))
-passthroughtr.Processes.Default.TimeOut=5 # seconds
-passthroughtr.Processes.Default.ReturnCode=0
-passthroughtr.Processes.Default.Streams.stdout="passthrough_get.gold"
+passthroughtr.Processes.Default.TimeOut = 5  # seconds
+passthroughtr.Processes.Default.ReturnCode = 0
+passthroughtr.Processes.Default.Streams.stdout = "passthrough_get.gold"
diff --git a/tests/gold_tests/headers/forwarded-observer.py b/tests/gold_tests/headers/forwarded-observer.py
new file mode 100644
index 0000000..91b7baf
--- /dev/null
+++ b/tests/gold_tests/headers/forwarded-observer.py
@@ -0,0 +1,63 @@
+'''
+Extract the protocol information from the FORWARDED headers and store it in a log file for later verification.
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import re
+import subprocess
+
+log = open('forwarded.log', 'w')
+
+regexByEqualUuid = re.compile("^by=_[0-9a-f-]+$")
+
+byCount = 0;
+byEqualUuid = "__INVALID__"
+
+def observe(headers):
+
+    global byCount
+    global byEqualUuid
+
+    seen = False
+    for h in headers.items():
+        if h[0].lower() == "forwarded":
+
+            content = h[1]
+
+            if content.startswith("by="):
+
+                byCount += 1
+
+                if ((byCount == 4) or (byCount == 5)) and regexByEqualUuid.match(content):  # "by" should give UUID
+
+                    # I don't think there is a way to know what UUID traffic_server generates, so I just do a crude format
+                    # check and make sure the same value is used consistently.
+
+                    byEqualUuid = content
+
+            content = content.replace(byEqualUuid, "__BY_EQUAL_UUID__", 1)
+
+            log.write(content + "\n")
+            seen = True
+
+    if not seen:
+        log.write("FORWARDED MISSING\n")
+    log.write("-\n")
+    log.flush()
+
+
+Hooks.register(Hooks.ReadRequestHook, observe)
diff --git a/tests/gold_tests/headers/forwarded.gold b/tests/gold_tests/headers/forwarded.gold
new file mode 100644
index 0000000..45451d6
--- /dev/null
+++ b/tests/gold_tests/headers/forwarded.gold
@@ -0,0 +1,41 @@
+FORWARDED MISSING
+-
+FORWARDED MISSING
+-
+for=127.0.0.1
+-
+by=127.0.0.1
+-
+by=unknown
+-
+by=Poxy_Proxy
+-
+__BY_EQUAL_UUID__
+-
+proto=http
+-
+host=www.forwarded-host.com
+-
+connection=http
+-
+connection=http/1.1
+-
+connection=http/1.1-tcp-ipv4
+-
+__BY_EQUAL_UUID__
+-
+for=127.0.0.1;by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by=127.0.0.1;proto=http;host=www.no-oride.com;connection=http;connection=http/1.1;connection=http/1.1-tcp-ipv4
+-
+for=127.0.0.1;by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by=127.0.0.1;proto=http;host=www.no-oride.com;connection=http;connection=http/1.0;connection=http/1.0-tcp-ipv4
+-
+for=0.6.6.6
+for=_argh, for=127.0.0.1;by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by=127.0.0.1;proto=http;host=www.no-oride.com;connection=http;connection=http/1.0;connection=http/1.0-tcp-ipv4
+-
+for=127.0.0.1;by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by=127.0.0.1;proto=https;host=www.no-oride.com;connection=https;connection=https/2;connection=http/1.1-h2-tls/1.2-tcp-ipv4
+-
+for=127.0.0.1;by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by=127.0.0.1;proto=https;host=www.no-oride.com;connection=https;connection=https/1.1;connection=http/1.1-tls/1.2-tcp-ipv4
+-
+for="[::1]";by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by="[::1]";proto=http;host=www.no-oride.com;connection=http;connection=http/1.1;connection=http/1.1-tcp-ipv6
+-
+for="[::1]";by=unknown;by=Poxy_Proxy;__BY_EQUAL_UUID__;by="[::1]";proto=https;host=www.no-oride.com;connection=https;connection=https/1.1;connection=http/1.1-tls/1.2-tcp-ipv6
+-
diff --git a/tests/gold_tests/headers/forwarded.test.py b/tests/gold_tests/headers/forwarded.test.py
new file mode 100644
index 0000000..e45a9d4
--- /dev/null
+++ b/tests/gold_tests/headers/forwarded.test.py
@@ -0,0 +1,289 @@
+'''
+Test the Forwarded header and related configuration..
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+import subprocess
+
+Test.Summary = '''
+Test FORWARDED header.
+'''
+
+Test.SkipUnless(
+    Condition.HasATSFeature('TS_USE_TLS_ALPN'),
+    Condition.HasCurlFeature('http2'),
+    Condition.HasCurlFeature('IPv6')
+)
+Test.ContinueOnFail = True
+
+testName = "FORWARDED"
+
+server = Test.MakeOriginServer("server", options={'--load': os.path.join(Test.TestDirectory, 'forwarded-observer.py')})
+
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.no-oride.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-none.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-for.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-ip.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-unknown.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-server-name.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-uuid.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-proto.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-host.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-compact.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-std.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {
+    "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-full.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+
+# Set up to check the output after the tests have run.
+#
+forwarded_log_id = Test.Disk.File("forwarded.log")
+forwarded_log_id.Content = "forwarded.gold"
+
+def baselineTsSetup(ts, sslPort):
+
+    ts.addSSLfile("../remap/ssl/server.pem")
+    ts.addSSLfile("../remap/ssl/server.key")
+
+    ts.Variables.ssl_port = sslPort
+
+    ts.Disk.records_config.update({
+        # 'proxy.config.diags.debug.enabled': 1,
+        'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request.
+        'proxy.config.http.cache.http': 0, # Make sure each request is forwarded to the origin server.
+        'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name.
+        'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
+        'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
+        'proxy.config.http.server_ports': (
+            'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'
+                .format(ts.Variables.port, ts.Variables.ssl_port))
+    })
+
+    ts.Disk.ssl_multicert_config.AddLine(
+        'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
+    )
+
+    ts.Disk.remap_config.AddLine(
+        'map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)
+    )
+
+ts = Test.MakeATSProcess("ts", select_ports=False)
+
+baselineTsSetup(ts, 4443)
+
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-none.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=none'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-for.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=for'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-by-ip.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=ip'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-by-unknown.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=unknown'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-by-server-name.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=serverName'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-by-uuid.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=uuid'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-proto.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=proto'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-host.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=host'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-connection-compact.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=compact'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-connection-std.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=std'
+)
+ts.Disk.remap_config.AddLine(
+    'map http://www.forwarded-connection-full.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+    ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=full'
+)
+
+# Ask the OS if the port is ready for connect()
+#
+def CheckPort(Port):
+    return lambda: 0 == subprocess.call('netstat --listen --tcp -n | grep -q :{}'.format(Port), shell=True)
+
+# Basic HTTP 1.1 -- No Forwarded by default
+tr = Test.AddTestRun()
+# Wait for the micro server
+tr.Processes.Default.StartBefore(server, ready=CheckPort(server.Variables.Port))
+# Delay on readiness of our ssl ports
+tr.Processes.Default.StartBefore(Test.Processes.ts, ready=CheckPort(ts.Variables.ssl_port))
+#
+tr.Processes.Default.Command = (
+  'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts.Variables.port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+def TestHttp1_1(host):
+
+    tr = Test.AddTestRun()
+    tr.Processes.Default.Command = (
+        'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://{}'.format(ts.Variables.port, host)
+    )
+    tr.Processes.Default.ReturnCode = 0
+
+# Basic HTTP 1.1 -- No Forwarded -- explicit configuration.
+#
+TestHttp1_1('www.forwarded-none.com')
+
+# Test enabling of each forwarded parameter singly.
+
+TestHttp1_1('www.forwarded-for.com')
+
+# Note:  forwaded-obsersver.py counts on the "by" tests being done in the order below.
+
+TestHttp1_1('www.forwarded-by-ip.com')
+TestHttp1_1('www.forwarded-by-unknown.com')
+TestHttp1_1('www.forwarded-by-server-name.com')
+TestHttp1_1('www.forwarded-by-uuid.com')
+
+TestHttp1_1('www.forwarded-proto.com')
+TestHttp1_1('www.forwarded-host.com')
+TestHttp1_1('www.forwarded-connection-compact.com')
+TestHttp1_1('www.forwarded-connection-std.com')
+TestHttp1_1('www.forwarded-connection-full.com')
+
+ts2 = Test.MakeATSProcess("ts2", command="traffic_manager", select_ports=False)
+
+ts2.Variables.port += 1
+
+baselineTsSetup(ts2, 4444)
+
+ts2.Disk.records_config.update({
+    'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request.
+    'proxy.config.http.insert_forwarded': 'by=uuid'})
+
+ts2.Disk.remap_config.AddLine(
+    'map https://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)
+)
+
+# Forwarded header with UUID of 2nd ATS.
+tr = Test.AddTestRun()
+# Delay on readiness of our ssl ports
+tr.Processes.Default.StartBefore(Test.Processes.ts2, ready=CheckPort(ts2.Variables.ssl_port))
+#
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+# Call traffic_ctrl to set insert_forwarded
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    'traffic_ctl --debug config set proxy.config.http.insert_forwarded' +
+    ' "for|by=ip|by=unknown|by=servername|by=uuid|proto|host|connection=compact|connection=std|connection=full"'
+)
+tr.Processes.Default.ForceUseShell = False
+tr.Processes.Default.Env = ts2.Env
+tr.Processes.Default.ReturnCode = 0
+
+# HTTP 1.1
+tr = Test.AddTestRun()
+# Delay to give traffic_ctl config change time to take effect.
+tr.DelayStart = 15
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+# HTTP 1.0
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv4 --http1.0 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+# HTTP 1.0 -- Forwarded headers already present
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    "curl --verbose -H 'forwarded:for=0.6.6.6' -H 'forwarded:for=_argh' --ipv4 --http1.0" +
+    " --proxy localhost:{} http://www.no-oride.com".format(ts2.Variables.port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+# HTTP 2
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv4 --http2 --insecure --header "Host: www.no-oride.com"' +
+    ' https://localhost:{}'.format(ts2.Variables.ssl_port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+# TLS
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv4 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}'
+        .format(ts2.Variables.ssl_port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+# IPv6
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv6 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)
+)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = (
+    'curl --verbose --ipv6 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}'.format(ts2.Variables.ssl_port)
+)
+tr.Processes.Default.ReturnCode = 0
diff --git a/tests/gold_tests/headers/http408.test.py b/tests/gold_tests/headers/http408.test.py
index 9212023..b7b7137 100644
--- a/tests/gold_tests/headers/http408.test.py
+++ b/tests/gold_tests/headers/http408.test.py
@@ -24,35 +24,35 @@ Test.Summary = '''
 Check 408 response header for protocol stack data.
 '''
 
-Test.SkipUnless(
-)
 Test.ContinueOnFail = True
 
 # Define default ATS
 ts = Test.MakeATSProcess("ts")
 server = Test.MakeOriginServer("server")
 
-testName = "408 test"
+HTTP_408_HOST = 'www.http408.test'
 
-request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: {}\r\n\r\n".format(HTTP_408_HOST), "timestamp": "1469733493.993", "body": ""}
 response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
 server.addResponse("sessionlog.json", request_header, response_header)
 
 ts.Disk.remap_config.AddLine(
-    'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
-    )
+    'map http://{0} http://127.0.0.1:{1}'.format(HTTP_408_HOST, server.Variables.Port)
+)
 
+TIMEOUT=2
 ts.Disk.records_config.update({
-    'proxy.config.http.transaction_no_activity_timeout_in' : 2,
-    })
+    'proxy.config.http.transaction_no_activity_timeout_in': TIMEOUT,
+})
 
-Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_408_client.py'))
+Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py'))
+Test.Setup.Copy('data')
 
 tr = Test.AddTestRun()
 tr.Processes.Default.StartBefore(server)
 tr.Processes.Default.StartBefore(Test.Processes.ts)
-tr.Processes.Default.Command = 'python tcp_408_client.py 127.0.0.1 {0} 4'.format(
-    ts.Variables.port)
+tr.Processes.Default.Command = 'python tcp_client.py 127.0.0.1 {0} {1} --delay-after-send {2}'\
+        .format(ts.Variables.port, 'data/{0}.txt'.format(HTTP_408_HOST), TIMEOUT + 2)
 tr.Processes.Default.ReturnCode = 0
 tr.Processes.Default.TimeOut = 10
 tr.Processes.Default.Streams.stdout = "http408.gold"
diff --git a/tests/gold_tests/headers/normalize_ae.gold b/tests/gold_tests/headers/normalize_ae.gold
new file mode 100644
index 0000000..7277b70
--- /dev/null
+++ b/tests/gold_tests/headers/normalize_ae.gold
@@ -0,0 +1,104 @@
+X-Au-Test: www.no-oride.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+X-Au-Test: www.ae-0.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+x-gzip
+-
+br
+-
+gzip, br
+-
+gzip;q=0.3, whatever;q=0.666, br;q=0.7
+-
+X-Au-Test: www.ae-1.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+X-Au-Test: www.ae-2.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+br
+-
+br
+-
+br
+-
+X-Au-Test: www.no-oride.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+x-gzip
+-
+br
+-
+gzip, br
+-
+gzip;q=0.3, whatever;q=0.666, br;q=0.7
+-
+X-Au-Test: www.ae-0.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+x-gzip
+-
+br
+-
+gzip, br
+-
+gzip;q=0.3, whatever;q=0.666, br;q=0.7
+-
+X-Au-Test: www.ae-1.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+X-Au-Test: www.ae-2.com
+ACCEPT-ENCODING MISSING
+-
+gzip
+-
+gzip
+-
+br
+-
+br
+-
+br
+-
diff --git a/tests/gold_tests/headers/normalize_ae.test.py b/tests/gold_tests/headers/normalize_ae.test.py
new file mode 100644
index 0000000..d5e122e
--- /dev/null
+++ b/tests/gold_tests/headers/normalize_ae.test.py
@@ -0,0 +1,145 @@
+'''
+Test normalizations of the Accept-Encoding header field.
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+import subprocess
+
+Test.Summary = '''
+Test normalizations of the Accept-Encoding header field.
+'''
+
+Test.ContinueOnFail = True
+
+server = Test.MakeOriginServer("server", options={'--load': os.path.join(Test.TestDirectory, 'normalize_ae_observer.py')})
+
+testName = "NORMALIZE_AE"
+
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.no-oride.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.ae-0.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.ae-1.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.ae-2.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionlog.json", request_header, response_header)
+
+# Define first ATS
+ts = Test.MakeATSProcess("ts", select_ports=False)
+
+def baselineTsSetup(ts):
+
+    ts.Disk.records_config.update({
+        # 'proxy.config.diags.debug.enabled': 1,
+        'proxy.config.http.cache.http': 0, # Make sure each request is sent to the origin server.
+        'proxy.config.http.server_ports': 'ipv4:{}'.format(ts.Variables.port)
+    })
+
+    ts.Disk.remap_config.AddLine(
+        'map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)
+    )
+    ts.Disk.remap_config.AddLine(
+        'map http://www.ae-0.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+        ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=0'
+    )
+    ts.Disk.remap_config.AddLine(
+        'map http://www.ae-1.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+        ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=1'
+    )
+    ts.Disk.remap_config.AddLine(
+        'map http://www.ae-2.com http://127.0.0.1:{0}'.format(server.Variables.Port) +
+        ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=2'
+    )
+
+baselineTsSetup(ts)
+
+# set up to check the output after the tests have run.
+#
+normalize_ae_log_id = Test.Disk.File("normalize_ae.log")
+normalize_ae_log_id.Content = "normalize_ae.gold"
+
+# ask the os if the port is ready for connect()
+#
+def CheckPort(port):
+    return lambda: 0 == subprocess.call('netstat --listen --tcp -n | grep -q :{}'.format(port), shell=True)
+
+# Try various Accept-Encoding header fields for a particular traffic server and host.
+def allAEHdrs(shouldWaitForUServer, shouldWaitForTs, ts, host):
+
+    tr = test.AddTestRun()
+
+    if shouldWaitForUServer:
+        # wait for the micro server
+        tr.Processes.Default.StartBefore(server, ready=CheckPort(server.Variables.Port))
+
+    if shouldWaitForTs:
+        # wait for the micro server
+        # delay on readiness of port
+        tr.Processes.Default.StartBefore(ts, ready=CheckPort(ts.Variables.port))
+
+    baseCurl = 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} '.format(ts.Variables.port)
+
+    # No Accept-Encoding header.
+    #
+    tr.Processes.Default.Command = baseCurl + '--header "X-Au-Test: {0}" http://{0}'.format(host)
+    tr.Processes.Default.ReturnCode = 0
+
+    def curlTail(hdrValue):
+        return '--header "Accept-Encoding: {}" http://'.format(hdrValue) + host
+
+    tr = test.AddTestRun()
+    tr.Processes.Default.Command = baseCurl + curlTail('gzip')
+    tr.Processes.Default.ReturnCode = 0
+
+    tr = test.AddTestRun()
+    tr.Processes.Default.Command = baseCurl + curlTail('x-gzip')
+    tr.Processes.Default.ReturnCode = 0
+
+    tr = test.AddTestRun()
+    tr.Processes.Default.Command = baseCurl + curlTail('br')
+    tr.Processes.Default.ReturnCode = 0
+
+    tr = test.AddTestRun()
+    tr.Processes.Default.Command = baseCurl + curlTail('gzip, br')
+    tr.Processes.Default.ReturnCode = 0
+
+    tr = test.AddTestRun()
+    tr.Processes.Default.Command = baseCurl + curlTail('gzip;q=0.3, whatever;q=0.666, br;q=0.7')
+    tr.Processes.Default.ReturnCode = 0
+
+def perTsTest(shouldWaitForUServer, ts):
+    allAEHdrs(shouldWaitForUServer, True, ts, 'www.no-oride.com')
+    allAEHdrs(False, False, ts, 'www.ae-0.com')
+    allAEHdrs(False, False, ts, 'www.ae-1.com')
+    allAEHdrs(False, False, ts, 'www.ae-2.com')
+
+perTsTest(True, ts)
+
+# Define second ATS
+ts2 = Test.MakeATSProcess("ts2", select_ports=False)
+
+ts2.Variables.port += 1
+
+baselineTsSetup(ts2)
+
+ts2.Disk.records_config.update({
+    'proxy.config.http.normalize_ae': 0,
+})
+
+perTsTest(False, ts2)
diff --git a/tests/tools/traffic-replay/Config.py b/tests/gold_tests/headers/normalize_ae_observer.py
similarity index 58%
copy from tests/tools/traffic-replay/Config.py
copy to tests/gold_tests/headers/normalize_ae_observer.py
index 1b97227..10de1d4 100644
--- a/tests/tools/traffic-replay/Config.py
+++ b/tests/gold_tests/headers/normalize_ae_observer.py
@@ -1,5 +1,5 @@
-#!/bin/env python3
 '''
+Extract the protocol information from the Accept-Encoding headers and store it in a log file for later verification.
 '''
 #  Licensed to the Apache Software Foundation (ASF) under one
 #  or more contributor license agreements.  See the NOTICE file
@@ -17,18 +17,23 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-# SSL config
-ca_certs = None
-keyfile = None
+log = open('normalize_ae.log', 'w')
 
-# Proxy config
-proxy_host = "127.0.0.1"
-proxy_ssl_port = 443
-proxy_nonssl_port = 8080
+def observe(headers):
 
-# process and thread config
-nProcess = 4
-nThread = 4
+    seen = False
+    for h in headers.items():
+        if h[0] == "X-Au-Test":
+            log.write("X-Au-Test: {}\n".format(h[1]))
 
-#colorize output
-colorize = True
+        if h[0] == "Accept-Encoding":
+            log.write("{}\n".format(h[1]))
+            seen = True
+
+    if not seen:
+        log.write("ACCEPT-ENCODING MISSING\n")
+
+    log.write("-\n")
+    log.flush()
+
+Hooks.register(Hooks.ReadRequestHook, observe)
diff --git a/tests/gold_tests/headers/redirect0_get.gold b/tests/gold_tests/headers/redirect0_get.gold
new file mode 100644
index 0000000..b9e828d
--- /dev/null
+++ b/tests/gold_tests/headers/redirect0_get.gold
@@ -0,0 +1,22 @@
+HTTP/1.1 302 Redirect
+Connection: keep-alive
+Cache-Control: no-store
+Location: http://www.redirect0.test/
+Content-Type: text/html
+Content-Language: en
+Content-Length: 308
+
+<HTML>
+<HEAD>
+<TITLE>Document Has Moved</TITLE>
+</HEAD>
+
+<BODY BGCOLOR="white" FGCOLOR="black">
+<H1>Document Has Moved</H1>
+<HR>
+
+<FONT FACE="Helvetica,Arial"><B>
+Description: The document you requested has moved to a new location.  The new location is "http://www.redirect0.test/".
+</B></FONT>
+<HR>
+</BODY>
diff --git a/tests/gold_tests/logging/ccid_ctid.test.py b/tests/gold_tests/logging/ccid_ctid.test.py
new file mode 100644
index 0000000..ef60083
--- /dev/null
+++ b/tests/gold_tests/logging/ccid_ctid.test.py
@@ -0,0 +1,107 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+import subprocess
+
+Test.Summary = '''
+Test new ccid and ctid log fields
+'''
+# need Curl
+Test.SkipUnless(
+    Condition.HasProgram(
+        "curl", "Curl need to be installed on system for this test to work"),
+    # Condition.IsPlatform("linux"), Don't see the need for this.
+    Condition.HasATSFeature('TS_USE_TLS_ALPN'),
+    Condition.HasCurlFeature('http2')
+)
+
+# Define default ATS.  "select_ports=False" needed because SSL port used.
+#
+ts = Test.MakeATSProcess("ts", select_ports=False)
+
+ts.addSSLfile("../remap/ssl/server.pem")
+ts.addSSLfile("../remap/ssl/server.key")
+
+ts.Variables.ssl_port = 4443
+ts.Disk.records_config.update({
+    # 'proxy.config.diags.debug.enabled': '1',
+    'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
+    'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
+    'proxy.config.http.server_ports': 'ipv4:{0} ipv4:{1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port)
+})
+
+ts.Disk.remap_config.AddLine(
+    'map http://127.0.0.1:{0} http://httpbin.org/ip'.format(ts.Variables.port)
+)
+
+ts.Disk.remap_config.AddLine(
+    'map https://127.0.0.1:{0} https://httpbin.org/ip'.format(ts.Variables.ssl_port)
+)
+
+ts.Disk.ssl_multicert_config.AddLine(
+    'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
+)
+
+ts.Disk.logging_config.AddLines(
+    '''custom = format {
+  Format = "%<ccid> %<ctid>"
+}
+
+log.ascii {
+  Format = custom,
+  Filename = 'test_ccid_ctid'
+}'''.split("\n")
+)
+
+# Ask the OS if the port is ready for connect()
+#
+def CheckPort(Port):
+    return lambda: 0 == subprocess.call('netstat --listen --tcp -n | grep -q :{}'.format(Port), shell=True)
+
+tr = Test.AddTestRun()
+# Delay on readiness of ssl port
+tr.Processes.Default.StartBefore(Test.Processes.ts, ready=CheckPort(ts.Variables.ssl_port))
+#
+tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" "http://127.0.0.1:{0}" --http1.1 --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl "https://127.0.0.1:{0}" "https://127.0.0.1:{0}" --http2 --insecure --verbose'.format(
+    ts.Variables.ssl_port)
+tr.Processes.Default.ReturnCode = 0
+
+# Delay to allow TS to flush report to disk, then validate generated log.
+#
+tr = Test.AddTestRun()
+tr.DelayStart = 10
+tr.Processes.Default.Command = 'python {0} < {1}'.format(
+    os.path.join(Test.TestDirectory, 'ccid_ctid_observer.py'),
+    os.path.join(ts.Variables.LOGDIR, 'test_ccid_ctid.log'))
+tr.Processes.Default.ReturnCode = 0
diff --git a/tests/tools/traffic-replay/Config.py b/tests/gold_tests/logging/ccid_ctid_observer.py
similarity index 52%
copy from tests/tools/traffic-replay/Config.py
copy to tests/gold_tests/logging/ccid_ctid_observer.py
index 1b97227..1b4cee5 100644
--- a/tests/tools/traffic-replay/Config.py
+++ b/tests/gold_tests/logging/ccid_ctid_observer.py
@@ -1,5 +1,5 @@
-#!/bin/env python3
 '''
+Examines log generated by ccid_ctid.test.py, returns 0 if valid, 1 if not.
 '''
 #  Licensed to the Apache Software Foundation (ASF) under one
 #  or more contributor license agreements.  See the NOTICE file
@@ -17,18 +17,37 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-# SSL config
-ca_certs = None
-keyfile = None
+import sys
+import csv
 
-# Proxy config
-proxy_host = "127.0.0.1"
-proxy_ssl_port = 443
-proxy_nonssl_port = 8080
+ccid = []
+ctid = []
 
-# process and thread config
-nProcess = 4
-nThread = 4
+# Read in ccid and ctid fields from each line of the generated report.
+#
+for ln in csv.reader(sys.stdin, delimiter=' '):
+    if len(ln) != 2:
+        exit(code=1)
+    i = int(ln[0])
+    if i < 0:
+        exit(code=1)
+    ccid.append(i)
+    i = int(ln[1])
+    if i < 0:
+        exit(code=1)
+    ctid.append(i)
+
+# Validate contents of report.
+#
+if (ccid[0] != ccid[1] and
+    ccid[1] != ccid[2] and
+    ccid[2] == ccid[3] and
+    ctid[2] != ctid[3] and
+    ccid[3] != ccid[4] and
+    ccid[4] == ccid[5] and
+    ctid[4] != ctid[5]):
+    exit(code=0)
 
-#colorize output
-colorize = True
+# Failure exit if report was not valid.
+#
+exit(code=1)
diff --git a/tests/gold_tests/logging/gold/field-test.gold b/tests/gold_tests/logging/gold/field-test.gold
new file mode 100644
index 0000000..f75ee82
--- /dev/null
+++ b/tests/gold_tests/logging/gold/field-test.gold
@@ -0,0 +1,3 @@
+application/json,%20application/json
+application/jason,%20application/json
+application/json
diff --git a/tests/gold_tests/logging/log-field.test.py b/tests/gold_tests/logging/log-field.test.py
new file mode 100644
index 0000000..66cd7c7
--- /dev/null
+++ b/tests/gold_tests/logging/log-field.test.py
@@ -0,0 +1,102 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+
+Test.Summary = '''
+Test log fields.
+'''
+# need Curl
+Test.SkipUnless(
+    Condition.HasProgram(
+        "curl", "Curl need to be installed on system for this test to work"),
+    Condition.IsPlatform("linux")
+)
+
+# Define default ATS
+ts = Test.MakeATSProcess("ts")
+# Microserver
+server = Test.MakeOriginServer("server")
+
+request_header = {'timestamp': 100, "headers": "GET /test-1 HTTP/1.1\r\nHost: test-1\r\n\r\n", "body": ""}
+response_header = {'timestamp': 100,
+                   "headers": "HTTP/1.1 200 OK\r\nTest: 1\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 1"}
+server.addResponse("sessionlog.json", request_header, response_header)
+server.addResponse("sessionlog.json",
+                   {'timestamp': 101, "headers": "GET /test-2 HTTP/1.1\r\nHost: test-2\r\n\r\n", "body": ""},
+                   {'timestamp': 101, "headers": "HTTP/1.1 200 OK\r\nTest: 2\r\nContent-Type: application/jason\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 2"}
+                   )
+server.addResponse("sessionlog.json",
+                   {'timestamp': 102, "headers": "GET /test-3 HTTP/1.1\r\nHost: test-3\r\n\r\n", "body": ""},
+                   {'timestamp': 102, "headers": "HTTP/1.1 200 OK\r\nTest: 3\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", "body": "Test 3"}
+                   )
+
+ts.Disk.records_config.update({
+    'proxy.config.net.connections_throttle': 100,
+    'proxy.config.http.cache.http': 0
+})
+# setup some config file for this server
+ts.Disk.remap_config.AddLine(
+    'map / http://localhost:{}/'.format(server.Variables.Port)
+)
+
+ts.Disk.logging_config.AddLines(
+    '''
+custom = format {
+  Format = '%<{Content-Type}essh>'
+}
+
+log.ascii {
+  Format = custom,
+  Filename = 'field-test'
+}
+'''.split("\n")
+)
+
+# #########################################################################
+# at the end of the different test run a custom log file should exist
+# Because of this we expect the testruns to pass the real test is if the
+# customlog file exists and passes the format check
+Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'field-test.log'),
+               exists=True, content='gold/field-test.gold')
+
+# first test is a miss for default
+tr = Test.AddTestRun()
+# Wait for the micro server
+tr.Processes.Default.StartBefore(server)
+# Delay on readiness of our ssl ports
+tr.Processes.Default.StartBefore(Test.Processes.ts)
+
+tr.Processes.Default.Command = 'curl --verbose --header "Host: test-1" http://localhost:{0}/test-1' .format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --verbose --header "Host: test-2" http://localhost:{0}/test-2' .format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --verbose --header "Host: test-3" http://localhost:{0}/test-3' .format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+
+tr = Test.AddTestRun()
+tr.DelayStart = 10
+tr.Processes.Default.Command = 'echo "Delay for log flush"'
+tr.Processes.Default.ReturnCode = 0
diff --git a/tests/gold_tests/null_transform/gold/null_transform-200.gold b/tests/gold_tests/null_transform/gold/null_transform-200.gold
new file mode 100644
index 0000000..fcc57b3
--- /dev/null
+++ b/tests/gold_tests/null_transform/gold/null_transform-200.gold
@@ -0,0 +1,14 @@
+``
+> GET http://www.example.com/ HTTP/1.1
+> Host: www.example.com``
+> User-Agent: curl/``
+> Accept: */*
+> Proxy-Connection:``
+``
+< HTTP/1.1 200 OK
+< Date:``
+< Content-Length: 26
+< Age: ``
+< Proxy-Connection: keep-alive
+< Server: ATS/``
+``
diff --git a/tests/gold_tests/null_transform/gold/null_transform-tag.gold b/tests/gold_tests/null_transform/gold/null_transform-tag.gold
new file mode 100644
index 0000000..7f60846
--- /dev/null
+++ b/tests/gold_tests/null_transform/gold/null_transform-tag.gold
@@ -0,0 +1 @@
+``DIAG: (null_transform)``
\ No newline at end of file
diff --git a/tests/gold_tests/null_transform/null_transform.test.py b/tests/gold_tests/null_transform/null_transform.test.py
new file mode 100644
index 0000000..846f630
--- /dev/null
+++ b/tests/gold_tests/null_transform/null_transform.test.py
@@ -0,0 +1,84 @@
+
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+import os
+Test.Summary = '''
+Test a basic null transform plugin
+'''
+
+# Need Curl
+Test.SkipUnless(
+    Condition.HasProgram("curl", "curl needs to be installed on system for this test to work")
+)
+Test.ContinueOnFail = True
+
+# Define default ATS
+ts = Test.MakeATSProcess("ts")
+server = Test.MakeOriginServer("server")
+
+Test.testName = ""
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n",
+                  "timestamp": "1469733493.993",
+                  "body": ""
+                  }
+# Expected response from origin server
+response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n",
+                   "timestamp": "1469733493.993",
+
+                   "body": "This is expected response."}
+
+# Add response the server dictionary
+server.addResponse("sessionfile.log", request_header, response_header)
+ts.Disk.records_config.update({
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'null_transform'
+})
+ts.Disk.remap_config.AddLine(
+    'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
+)
+
+# Load plugin
+plugin_args = ""
+lib_dir = os.path.join(Test.Variables.AtsTestToolsDir, '../../lib')
+plugin_dir = ts.Env['PROXY_CONFIG_PLUGIN_PLUGIN_DIR']
+plugin_dir_src = os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'null_transform.c')
+ts.Setup.Copy(plugin_dir_src, plugin_dir)
+
+in_basename = os.path.basename(plugin_dir_src)
+in_path = os.path.join(plugin_dir, in_basename)
+out_basename = os.path.splitext(in_basename)[0] + '.so'
+out_path = os.path.join(plugin_dir, out_basename)
+
+ts.Setup.RunCommand('tsxs -c {0} -o {1} -I {2}'.format(in_path, out_path, lib_dir))
+ts.Disk.plugin_config.AddLine("{0} {1}".format(out_basename, plugin_args))
+
+
+# www.example.com Host
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" --verbose -H "Proxy-Connection: keep-alive"'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.StartBefore(Test.Processes.ts)
+tr.Processes.Default.Streams.stderr = "gold/null_transform-200.gold"
+tr.StillRunningAfter = server
+
+# Check Plugin Loading Information
+ts.Streams.All = "gold/null_transform-tag.gold"
diff --git a/tests/gold_tests/redirect/gold/redirect.gold b/tests/gold_tests/redirect/gold/redirect.gold
new file mode 100644
index 0000000..3738278
--- /dev/null
+++ b/tests/gold_tests/redirect/gold/redirect.gold
@@ -0,0 +1,5 @@
+HTTP/1.1 204 No Content
+Date: ``
+Age: ``
+Connection: keep-alive
+Server: ATS/``
diff --git a/tests/gold_tests/redirect/redirect.test.py b/tests/gold_tests/redirect/redirect.test.py
new file mode 100644
index 0000000..ffb686d
--- /dev/null
+++ b/tests/gold_tests/redirect/redirect.test.py
@@ -0,0 +1,63 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+Test.Summary = '''
+Test basic redirection
+'''
+
+# TODO figure out how to use this
+MAX_REDIRECT = 99
+
+Test.SkipUnless(
+    Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")
+)
+
+Test.ContinueOnFail = True
+
+ts = Test.MakeATSProcess("ts")
+redirect_serv = Test.MakeOriginServer("re_server")
+dest_serv = Test.MakeOriginServer("dest_server")
+
+ts.Disk.records_config.update({
+    'proxy.config.http.redirection_enabled': 1,
+    'proxy.config.http.number_of_redirections': MAX_REDIRECT,
+    'proxy.config.http.cache.http': 0  # ,
+    # 'proxy.config.diags.debug.enabled': 1
+})
+
+redirect_request_header = {"headers": "GET /redirect HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""}
+redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirectDest\r\n\r\n".format(
+    dest_serv.Variables.Port), "timestamp": "5678", "body": ""}
+dest_request_header = {"headers": "GET /redirectDest HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "11", "body": ""}
+dest_response_header = {"headers": "HTTP/1.1 204 No Content\r\n\r\n", "timestamp": "22", "body": ""}
+
+redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header)
+dest_serv.addResponse("sessionfile.log", dest_request_header, dest_response_header)
+
+ts.Disk.remap_config.AddLine(
+    'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, redirect_serv.Variables.Port)
+)
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl -i http://127.0.0.1:{0}/redirect'.format(ts.Variables.port)
+tr.Processes.Default.StartBefore(ts)
+tr.Processes.Default.StartBefore(redirect_serv)
+tr.Processes.Default.StartBefore(dest_serv)
+tr.Processes.Default.Streams.stdout = "gold/redirect.gold"
+tr.Processes.Default.ReturnCode = 0
diff --git a/tests/gold_tests/remap/gold/remap-DNS-200.gold b/tests/gold_tests/remap/gold/remap-DNS-200.gold
new file mode 100644
index 0000000..1727d4e
--- /dev/null
+++ b/tests/gold_tests/remap/gold/remap-DNS-200.gold
@@ -0,0 +1,14 @@
+``
+> GET http://testDNS.com``
+> Host: testDNS.com``
+> User-Agent: curl/``
+> Accept: */*
+``
+< HTTP/1.1 200 OK
+< Date: ``
+< Age: ``
+< Transfer-Encoding: chunked
+< Proxy-Connection: keep-alive
+< Server: ATS/``
+< 
+``
diff --git a/tests/gold_tests/remap/gold/remap-redirect.gold b/tests/gold_tests/remap/gold/remap-redirect.gold
new file mode 100644
index 0000000..51fdfc4
--- /dev/null
+++ b/tests/gold_tests/remap/gold/remap-redirect.gold
@@ -0,0 +1,15 @@
+``
+> GET http://test3.com``
+> Host: test3.com``
+> User-Agent: curl/``
+> Accept: */*
+``
+< HTTP/1.1 301 Redirect
+< Date: ``
+< Proxy-Connection: ``
+< Server: ATS/``
+< Cache-Control: ``
+< Location: http://httpbin.org/
+``
+< Content-Length: ``
+``
diff --git a/tests/gold_tests/remap/gold/remap-referer-hit.gold b/tests/gold_tests/remap/gold/remap-referer-hit.gold
new file mode 100644
index 0000000..1263973
--- /dev/null
+++ b/tests/gold_tests/remap/gold/remap-referer-hit.gold
@@ -0,0 +1,14 @@
+``
+> GET http://test4.com``
+> Host: test4.com``
+> User-Agent: curl/``
+> Accept: */*
+``
+< HTTP/1.1 200 OK
+< Date: ``
+< Age: ``
+< Transfer-Encoding: chunked
+< Proxy-Connection: keep-alive
+< Server: ATS/``
+< 
+``
diff --git a/tests/gold_tests/remap/gold/remap-referer-miss.gold b/tests/gold_tests/remap/gold/remap-referer-miss.gold
new file mode 100644
index 0000000..cef630e
--- /dev/null
+++ b/tests/gold_tests/remap/gold/remap-referer-miss.gold
@@ -0,0 +1,15 @@
+``
+> GET http://test4.com``
+> Host: test4.com``
+> User-Agent: curl/``
+> Accept: */*
+``
+< HTTP/1.1 302 Redirect
+< Date: ``
+< Proxy-Connection: ``
+< Server: ATS/``
+< Cache-Control: ``
+< Location: http://httpbin.org
+``
+< Content-Length: ``
+``
diff --git a/tests/gold_tests/remap/remap_http.test.py b/tests/gold_tests/remap/remap_http.test.py
index 9221e26..2543df2 100644
--- a/tests/gold_tests/remap/remap_http.test.py
+++ b/tests/gold_tests/remap/remap_http.test.py
@@ -28,6 +28,7 @@ Test.ContinueOnFail = True
 # Define default ATS
 ts = Test.MakeATSProcess("ts")
 server = Test.MakeOriginServer("server")
+dns = Test.MakeDNServer("dns", filename="dns_file.json")
 
 Test.testName = ""
 request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
@@ -38,7 +39,10 @@ response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "t
 server.addResponse("sessionfile.log", request_header, response_header)
 ts.Disk.records_config.update({
     'proxy.config.diags.debug.enabled': 1,
-    'proxy.config.diags.debug.tags': 'url.*',
+    'proxy.config.diags.debug.tags': 'http.*|dns',
+    'proxy.config.http.referer_filter': 1,
+    'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
+    'proxy.config.dns.resolv_conf': 'NULL'
 })
 
 ts.Disk.remap_config.AddLine(
@@ -47,13 +51,25 @@ ts.Disk.remap_config.AddLine(
 ts.Disk.remap_config.AddLine(
     'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port)
 )
+ts.Disk.remap_config.AddLine(
+    'redirect http://test3.com http://httpbin.org'.format(server.Variables.Port)
+)
+ts.Disk.remap_config.AddLine(
+    'map_with_referer http://test4.com http://127.0.0.1:{0} http://httpbin.org (.*[.])?persia[.]com'.format(server.Variables.Port)
+)
+ts.Disk.remap_config.AddLine(
+    'map http://testDNS.com http://audrey.hepburn.com:{0}'.format(server.Variables.Port)
+)
 
+# dns.addRecordtoDNS(filename="dns_file.json",hostname="wonderwoman",list_ip_addr=["127.0.0.1","127.0.1.1"])
+dns.addRecordtoDNS(filename="dns_file.json", hostname="audrey.hepburn.com", list_ip_addr=["127.0.0.1", "127.0.1.1"])
 # call localhost straight
 tr = Test.AddTestRun()
 tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/" --verbose'.format(ts.Variables.port)
 tr.Processes.Default.ReturnCode = 0
 # time delay as proxy.config.http.wait_for_cache could be broken
 tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.StartBefore(dns)
 tr.Processes.Default.StartBefore(Test.Processes.ts)
 tr.Processes.Default.Streams.stderr = "gold/remap-hitATS-404.gold"
 tr.StillRunningAfter = server
@@ -86,9 +102,36 @@ tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://www.test.com
 tr.Processes.Default.ReturnCode = 0
 tr.Processes.Default.Streams.stderr = "gold/remap-404.gold"
 
-# bad port
+# redirect result
 tr = Test.AddTestRun()
-tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://www.example.com:1234/"  -H "Proxy-Connection: keep-alive" --verbose'.format(
+tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://test3.com" --verbose'.format(ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Streams.stderr = "gold/remap-redirect.gold"
+
+# referer hit
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://test4.com" --header "Referer: persia.com" --verbose'.format(
     ts.Variables.port)
 tr.Processes.Default.ReturnCode = 0
-tr.Processes.Default.Streams.stderr = "gold/remap-404.gold"
+tr.Processes.Default.Streams.stderr = "gold/remap-referer-hit.gold"
+
+# referer miss
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://test4.com" --header "Referer: monkey.com" --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Streams.stderr = "gold/remap-referer-miss.gold"
+
+# referer hit
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://test4.com" --header "Referer: www.persia.com" --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Streams.stderr = "gold/remap-referer-hit.gold"
+
+# DNS test
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl  --proxy 127.0.0.1:{0} "http://testDNS.com" --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Streams.stderr = "gold/remap-DNS-200.gold"
diff --git a/tests/gold_tests/tls_hooks/tls_hooks12.test.py b/tests/gold_tests/tls_hooks/tls_hooks12.test.py
index 571538b..aff3bc3 100644
--- a/tests/gold_tests/tls_hooks/tls_hooks12.test.py
+++ b/tests/gold_tests/tls_hooks/tls_hooks12.test.py
@@ -74,13 +74,13 @@ ts.Streams.stderr = "gold/ts-preaccept-delayed-1-immdate-2.gold"
 # sequence may appear on that probe.  Or it may not.  If we move away from the probe connection
 # we can check for the right number of each message.
 #preacceptstring0 = "Pre accept delay callback 0"
-#ts.Streams.All = Testers.ContainsExpression(
+# ts.Streams.All = Testers.ContainsExpression(
 #    "\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(preacceptstring0), "Pre accept message appears only once or twice", reflags=re.S | re.M)
 #preacceptstring1 = "Pre accept callback 0"
-#ts.Streams.All = Testers.ContainsExpression(
+# ts.Streams.All = Testers.ContainsExpression(
 #    "\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(preacceptstring1), "Pre accept message appears only once or twice", reflags=re.S | re.M)
 #preacceptstring2 = "Pre accept callback 1"
-#ts.Streams.All = Testers.ContainsExpression(
+# ts.Streams.All = Testers.ContainsExpression(
 #    "\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(preacceptstring2), "Pre accept message appears only once or twice", reflags=re.S | re.M)
 
 tr.Processes.Default.TimeOut = 5
diff --git a/tests/gold_tests/transaction/txn.test.py b/tests/gold_tests/transaction/txn.test.py
new file mode 100644
index 0000000..f2d1d7b
--- /dev/null
+++ b/tests/gold_tests/transaction/txn.test.py
@@ -0,0 +1,121 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import os
+from random import randint
+Test.Summary = '''
+Test transactions and sessions, making sure they open and close in the proper order.
+'''
+# need Apache Benchmark. For RHEL7, this is httpd-tools
+Test.SkipUnless(
+    Condition.HasProgram("ab", "apache benchmark (httpd-tools) needs to be installed on system for this test to work")
+)
+Test.ContinueOnFail = True
+# Define default ATS
+ts = Test.MakeATSProcess("ts", command="traffic_manager")
+server = Test.MakeOriginServer("server")
+
+Test.testName = ""
+request_header = {"headers": "GET / HTTP/1.1\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+# expected response from the origin server
+response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+
+Test.prepare_plugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'ssntxnorder_verify.cc'), ts)
+
+# add response to the server dictionary
+server.addResponse("sessionfile.log", request_header, response_header)
+ts.Disk.records_config.update({
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'ssntxnorder_verify.*',
+    'proxy.config.http.cache.http' : 0, #disable cache to simply the test.
+    'proxy.config.cache.enable_read_while_writer' : 0
+})
+
+ts.Disk.remap_config.AddLine(
+    'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port)
+)
+
+numberOfRequests = randint(1000, 1500)
+
+# Make a *ton* of calls to the proxy!
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'ab -n {0} -c 10 http://127.0.0.1:{1}/;sleep 5'.format(numberOfRequests, ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+# time delay as proxy.config.http.wait_for_cache could be broken
+tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
+tr.Processes.Default.StartBefore(ts, ready=When.PortOpen(ts.Variables.port))
+tr.StillRunningAfter = ts
+
+# Watch the records snapshot file.
+records = ts.Disk.File(os.path.join(ts.Variables.RUNTIMEDIR, "records.snap"))
+
+
+def file_is_ready():
+    return os.path.exists(records.AbsPath)
+
+
+# Check our work on traffic_ctl
+# no errors happened,
+tr = Test.AddTestRun()
+tr.Processes.Process("filesleeper", "python -c 'from time import sleep; sleep(10)'")
+tr.Processes.Default.Command = 'traffic_ctl metric get ssntxnorder_verify.err'
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.StartBefore(tr.Processes.filesleeper, ready=file_is_ready)
+tr.Processes.Default.Streams.All = Testers.ContainsExpression(
+    'ssntxnorder_verify.err 0', 'incorrect statistic return, or possible error.')
+tr.StillRunningAfter = ts
+
+comparator_command = '''
+if [ "`traffic_ctl metric get ssntxnorder_verify.{0}.start | cut -d ' ' -f 2`" == "`traffic_ctl metric get ssntxnorder_verify.{0}.close | cut -d ' ' -f 2`" ]; then\
+     echo yes;\
+    else \
+    echo no; \
+    fi;
+    '''
+
+# number of sessions/transactions opened and closed are equal
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = comparator_command.format('ssn')
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents')
+tr.StillRunningAfter = ts
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = comparator_command.format('txn')
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents')
+tr.StillRunningAfter = ts
+
+# session count is positive,
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = "traffic_ctl metric get ssntxnorder_verify.ssn.start"
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression(" 0", 'should be nonzero')
+tr.StillRunningAfter = ts
+
+# and we receive the same number of transactions as we asked it to make
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = "traffic_ctl metric get ssntxnorder_verify.txn.start"
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Env = ts.Env
+tr.Processes.Default.Streams.stdout = Testers.ContainsExpression(
+    "ssntxnorder_verify.txn.start {}".format(numberOfRequests), 'should be the number of transactions we made')
+tr.StillRunningAfter = ts
diff --git a/tests/tools/microDNS/README.md b/tests/tools/microDNS/README.md
new file mode 100644
index 0000000..d6b533e
--- /dev/null
+++ b/tests/tools/microDNS/README.md
@@ -0,0 +1,46 @@
+uDNS
+=====
+
+uDNS is a small DNS server that takes in a pre-defined set of domain names and the IPs that each domain name maps to. The mappings should be inputted with a JSON file in a format described below.
+
+uDNS runs on localhost and serves whatever port is specified in the command line arguments. uDNS serves both UDP and TCP connections.
+
+If uDNS does not find the requested domain in the explicitly mapped section of the JSON, uDNS will respond with the IPs given in the `otherwise` section of the JSON. The `otherwise` section is mandatory.
+
+
+JSON format
+------
+```json
+{
+  "mappings": [
+    {"domain1": ["ip1", "ip2", "etc"]},
+    {"domain2": ["ip3", "ip4", "etc"]},
+    {"domain3": ["ip5"]},
+  ],
+
+  "otherwise": ["defaultip1", "defaultip2", "etc"]
+}
+```
+
+An example can be found in `sample_zonefile.json`
+
+
+Caveat
+------
+You should not include any two records like this: `host1.example.com` and `example.com`
+
+A DNS request for `host1.example.com` could return the A-record associated with `host1.example.com` or `example.com`, depending on your luck.
+
+
+Running
+------
+`python3 uDNS.py ip_addr port zone_file [--rr]`
+
+For a detailed description of flags, see `python3 uDNS.py -h`
+
+
+Use with Apache Traffic Server
+------
+1. In `records.config`, add configuration lines: `CONFIG proxy.config.dns.nameservers STRING ip_address:PORT` and `CONFIG proxy.config.dns.round_robin_nameservers INT 0`, where `PORT` is whatever port you want uDNS to serve on.
+2. Run uDNS on `Ip_addr`:`PORT`
+3. Now all domains mapped in the uDNS JSON config file should be mapped by ATS as well
diff --git a/tests/tools/microDNS/sample_zonefile.json b/tests/tools/microDNS/sample_zonefile.json
new file mode 100644
index 0000000..e4c8282
--- /dev/null
+++ b/tests/tools/microDNS/sample_zonefile.json
@@ -0,0 +1,9 @@
+{ 
+  "mappings": [
+    {"abc.xyz.com.": ["127.0.0.1","127.0.1.1"]},
+    {"yahoo.com.": ["128.0.0.1", "128.0.1.0"]},
+    {"yelp.com.": ["34.35.166.23", "129.0.0.1"]}
+  ],
+
+  "otherwise": ["127.0.0.1", "127.1.1.1"]
+}
diff --git a/tests/tools/microDNS/uDNS.py b/tests/tools/microDNS/uDNS.py
new file mode 100644
index 0000000..0748304
--- /dev/null
+++ b/tests/tools/microDNS/uDNS.py
@@ -0,0 +1,181 @@
+# coding=utf-8
+
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import datetime
+import sys
+import time
+import threading
+import traceback
+import socketserver
+import argparse
+import codecs
+import json
+from dnslib import *
+
+TTL = 60 * 5  # completely arbitrary TTL value
+round_robin = False
+default_records = list()
+records = dict()
+
+
+class DomainName(str):
+    def __getattr__(self, item):
+        return DomainName(item + '.' + self)
+
+
+class BaseRequestHandler(socketserver.BaseRequestHandler):
+
+    def get_data(self):
+        raise NotImplementedError
+
+    def send_data(self, data):
+        raise NotImplementedError
+
+    def handle(self):
+        now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')
+        print("\n\n%s request %s (%s %s):" % (self.__class__.__name__[:3], now, self.client_address[0],
+                                              self.client_address[1]))
+        try:
+            data = self.get_data()
+            self.send_data(dns_response(data))
+        except Exception:
+            traceback.print_exc(file=sys.stderr)
+
+
+class TCPRequestHandler(BaseRequestHandler):
+
+    def get_data(self):
+        data = self.request.recv(8192).strip()
+        sz = int(codecs.encode(data[:2], 'hex'), 16)
+        if sz < len(data) - 2:
+            raise Exception("Wrong size of TCP packet")
+        elif sz > len(data) - 2:
+            raise Exception("Too big TCP packet")
+        return data[2:]
+
+    def send_data(self, data):
+        sz = codecs.decode(hex(len(data))[2:].zfill(4), 'hex')
+        return self.request.sendall(sz + data)
+
+
+class UDPRequestHandler(BaseRequestHandler):
+
+    def get_data(self):
+        return self.request[0].strip()
+
+    def send_data(self, data):
+        return self.request[1].sendto(data, self.client_address)
+
+
+def build_domain_mappings(path):
+    with open(path) as f:
+        zone_file = json.load(f)
+
+    for domain in zone_file['mappings']:
+        for d in iter(domain.keys()):
+            # this loop only runs once, kind of a hack to access the only key in the dict
+            domain_name = DomainName(d)
+            print("Domain name:", domain_name)
+            records[domain_name] = [A(x) for x in domain[domain_name]]
+            print(records[domain_name])
+
+    default_records.extend([A(d) for d in zone_file['otherwise']])
+
+
+def add_authoritative_records(reply, domain):
+    # ns1 and ns1 are hardcoded in, change if necessary
+    reply.add_auth(RR(rname=domain, rtype=QTYPE.NS, rclass=1, ttl=TTL, rdata=NS(domain.ns1)))
+    reply.add_auth(RR(rname=domain, rtype=QTYPE.NS, rclass=1, ttl=TTL, rdata=NS(domain.ns2)))
+
+
+def dns_response(data):
+    ''' dns_response takes in the raw bytes from the socket and does all the logic behind what 
+        RRs get returned as the response '''
+    global default_records, records, TTL, round_robin
+
+    request = DNSRecord.parse(data)
+    print(request)
+
+    reply = DNSRecord(DNSHeader(id=request.header.id, qr=1, aa=1, ra=1), q=request.q)
+    qname = request.q.qname
+    qn = str(qname)
+    qtype = request.q.qtype
+    qt = QTYPE[qtype]
+    found_specific = False
+
+    # first look for a specific mapping
+    for domain, rrs in records.items():
+        if domain == qn or qn.endswith('.' + domain):
+            # we are the authoritative name server for this domain and all subdomains
+            for rdata in rrs:
+                # only include requested record types (ie. A, MX, etc)
+                rqt = rdata.__class__.__name__
+                if qt in ['*', rqt]:
+                    found_specific = True
+                    reply.add_answer(RR(rname=qname, rtype=getattr(QTYPE, str(rqt)), rclass=1, ttl=TTL, rdata=rdata))
+
+            # rotate the A entries if round robin is on
+            if round_robin:
+                a_records = [x for x in rrs if type(x) == A]
+                records[domain] = a_records[1:] + a_records[:1]  # rotate list
+            break
+
+    # else if a specific mapping is not found, return default A-records
+    if not found_specific:
+        for a in default_records:
+            reply.add_answer(RR(rname=qname, rtype=QTYPE.A, rclass=1, ttl=TTL, rdata=a))
+
+        if round_robin:
+            default_records = default_records[1:] + default_records[:1]
+    print("---- Reply: ----\n", reply)
+    return reply.pack()
+
+
+if __name__ == '__main__':
+    # handle cmd line args
+    parser = argparse.ArgumentParser()
+    parser.add_argument("ip_addr", type=str, help="Interface", default="127.0.0.1")
+    parser.add_argument("port", type=int, help="port uDNS should listen on")
+    parser.add_argument("zone_file", help="path to zone file")
+    parser.add_argument("--rr", action='store_true',
+                        help='round robin load balances if multiple IP addresses are present for 1 domain')
+    args = parser.parse_args()
+
+    if args.rr:
+        round_robin = True
+    build_domain_mappings(args.zone_file)
+
+    servers = [
+        socketserver.ThreadingUDPServer((args.ip_addr, args.port), UDPRequestHandler),
+        socketserver.ThreadingTCPServer((args.ip_addr, args.port), TCPRequestHandler),
+    ]
+
+    print("Starting DNS...")
+    for s in servers:
+        thread = threading.Thread(target=s.serve_forever)  # that thread will start one more thread for each request
+        thread.daemon = True  # exit the server thread when the main thread terminates
+        thread.start()
+
+    try:
+        while 1:
+            time.sleep(1)
+            sys.stderr.flush()
+            sys.stdout.flush()
+
+    except KeyboardInterrupt:
+        pass
+    finally:
+        for s in servers:
+            s.shutdown()
diff --git a/tests/tools/plugins/continuations_verify.cc b/tests/tools/plugins/continuations_verify.cc
new file mode 100644
index 0000000..a40d2b2
--- /dev/null
+++ b/tests/tools/plugins/continuations_verify.cc
@@ -0,0 +1,178 @@
+/**
+  @file
+  @brief Plugin to verify the ordering of session and transaction start and
+  close hooks is correct. Keeps track of statistics about the number of
+  hooks tracked that are caught and of the number of errors encountered.
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+*/
+
+#define __STDC_FORMAT_MACROS 1 // for inttypes.h
+#include <inttypes.h>          // for PRIu64
+#include <iostream>
+#include <map>
+#include <set>
+#include <sstream>
+#include <stdlib.h> // for abort
+#include <string.h> // for NULL macro
+#include <ts/ts.h>  // for debug
+
+// TODO Is LIFECYCLE_MSG enabled in 6.2.0, or 7.0.0, might require push
+// with version rework
+
+// debug messages viewable by setting 'proxy.config.diags.debug.tags'
+// in 'records.config'
+
+// debug messages during one-time initialization
+static const char DEBUG_TAG_INIT[] = "continuations_verify.init";
+
+// plugin registration info
+static char plugin_name[]   = "continuations_verify";
+static char vendor_name[]   = "Yahoo! Inc.";
+static char support_email[] = "ats-devel@yahoo-inc.com";
+
+static TSMutex order_mutex_1; // lock on global data
+static TSMutex order_mutex_2; // lock on global data
+
+// Statistics provided by the plugin
+static int stat_ssn_close_1 = 0; // number of TS_HTTP_SSN_CLOSE hooks caught by the first continuation
+static int stat_ssn_close_2 = 0; // number of TS_HTTP_SSN_CLOSE hooks caught by the second continuation
+static int stat_txn_close_1 = 0; // number of TS_HTTP_TXN_CLOSE hooks caught by the first continuation
+static int stat_txn_close_2 = 0; // number of TS_HTTP_TXN_CLOSE hooks caught by the second continuation
+
+/**
+    This function is called on every request and logs session and transaction
+    start and close events. It is used upon initialization to install the hooks
+    to the corresponding events. Return value is irrelevant.
+*/
+static int
+handle_order_1(TSCont contp, TSEvent event, void *edata)
+{
+  TSHttpSsn ssnp; // session data
+  TSHttpTxn txnp; // transaction data
+
+  // Find the event that happened
+  switch (event) {
+  case TS_EVENT_HTTP_TXN_CLOSE: // End of transaction
+    txnp = reinterpret_cast<TSHttpTxn>(edata);
+
+    TSStatIntIncrement(stat_txn_close_1, 1);
+    TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  case TS_EVENT_HTTP_SSN_CLOSE: // End of session
+    ssnp = reinterpret_cast<TSHttpSsn>(edata);
+
+    TSStatIntIncrement(stat_ssn_close_1, 1);
+    TSHttpSsnReenable(ssnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  // Just release the lock for all other states and do nothing
+  default:
+    break;
+  }
+
+  return 0;
+}
+
+static int
+handle_order_2(TSCont contp, TSEvent event, void *edata)
+{
+  TSHttpSsn ssnp; // session data
+  TSHttpTxn txnp; // transaction data
+
+  // Find the event that happened
+  switch (event) {
+  case TS_EVENT_HTTP_TXN_CLOSE: // End of transaction
+    txnp = reinterpret_cast<TSHttpTxn>(edata);
+
+    TSStatIntIncrement(stat_txn_close_2, 1);
+    TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  case TS_EVENT_HTTP_SSN_CLOSE: // End of session
+    ssnp = reinterpret_cast<TSHttpSsn>(edata);
+
+    TSStatIntIncrement(stat_ssn_close_2, 1);
+    TSHttpSsnReenable(ssnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  // Just release the lock for all other states and do nothing
+  default:
+    break;
+  }
+
+  return 0;
+}
+
+/**
+    Entry point for the plugin.
+        - Attaches global hooks for session start and close.
+        - Attaches global hooks for transaction start and close.
+        - Attaches lifecycle hook for communication through traffic_ctl
+        - Initializes all statistics as described in the README
+*/
+void
+TSPluginInit(int argc, const char *argv[])
+{
+  TSDebug(DEBUG_TAG_INIT, "initializing plugin");
+
+  TSPluginRegistrationInfo info;
+
+  info.plugin_name   = plugin_name;
+  info.vendor_name   = vendor_name;
+  info.support_email = support_email;
+
+#if (TS_VERSION_MAJOR < 3)
+  if (TSPluginRegister(TS_SDK_VERSION_2_0, &info) != TS_SUCCESS) {
+#elif (TS_VERSION_MAJOR < 6)
+  if (TSPluginRegister(TS_SDK_VERSION_3_0, &info) != TS_SUCCESS) {
+#else
+  if (TSPluginRegister(&info) != TS_SUCCESS) {
+#endif
+    TSError("[%s] Plugin registration failed. \n", plugin_name);
+  }
+
+  order_mutex_1 = TSMutexCreate();
+  TSCont contp_1;
+  order_mutex_2 = TSMutexCreate();
+  TSCont contp_2;
+
+  contp_1 = TSContCreate(handle_order_1, order_mutex_1);
+  contp_2 = TSContCreate(handle_order_2, order_mutex_2);
+  if (contp_1 == NULL || contp_2 == NULL) {
+    // Continuation initialization failed. Unrecoverable, report and exit.
+    TSError("[%s] could not create continuation", plugin_name);
+    abort();
+  } else {
+    // Continuation initialization succeeded.
+
+    stat_txn_close_1 =
+      TSStatCreate("continuations_verify.txn.close.1", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_ssn_close_1 =
+      TSStatCreate("continuations_verify.ssn.close.1", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_txn_close_2 =
+      TSStatCreate("continuations_verify.txn.close.2", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_ssn_close_2 =
+      TSStatCreate("continuations_verify.ssn.close.2", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+
+    // Add all hooks.
+    TSHttpHookAdd(TS_HTTP_TXN_CLOSE_HOOK, contp_1);
+    TSHttpHookAdd(TS_HTTP_SSN_CLOSE_HOOK, contp_1);
+
+    TSHttpHookAdd(TS_HTTP_TXN_CLOSE_HOOK, contp_2);
+    TSHttpHookAdd(TS_HTTP_SSN_CLOSE_HOOK, contp_2);
+  }
+}
diff --git a/example/null_transform/null_transform.c b/tests/tools/plugins/null_transform.c
similarity index 100%
copy from example/null_transform/null_transform.c
copy to tests/tools/plugins/null_transform.c
diff --git a/tests/tools/plugins/ssl_hook_test.cc b/tests/tools/plugins/ssl_hook_test.cc
index 5cdb004..e8ae055 100644
--- a/tests/tools/plugins/ssl_hook_test.cc
+++ b/tests/tools/plugins/ssl_hook_test.cc
@@ -38,9 +38,9 @@ int
 ReenableSSL(TSCont cont, TSEvent event, void *edata)
 {
   TSVConn ssl_vc = reinterpret_cast<TSVConn>(TSContDataGet(cont));
+  TSDebug(PN, "Callback reenable ssl_vc=%p", ssl_vc);
   TSVConnReenable(ssl_vc);
   TSContDestroy(cont);
-  TSDebug(PN, "Callback reenable ssl_vc=%p", ssl_vc);
   return TS_SUCCESS;
 }
 
diff --git a/tests/tools/plugins/ssntxnorder_verify.cc b/tests/tools/plugins/ssntxnorder_verify.cc
new file mode 100644
index 0000000..e2889d8
--- /dev/null
+++ b/tests/tools/plugins/ssntxnorder_verify.cc
@@ -0,0 +1,328 @@
+/**
+  @file
+  @brief Plugin to verify the ordering of session and transaction start and
+  close hooks is correct. Keeps track of statistics about the number of
+  hooks tracked that are caught and of the number of errors encountered.
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+*/
+
+#define __STDC_FORMAT_MACROS 1 // for inttypes.h
+#include <inttypes.h>          // for PRIu64
+#include <iostream>
+#include <map>
+#include <set>
+#include <sstream>
+#include <stdlib.h> // for abort
+#include <string.h> // for NULL macro
+#include <ts/ts.h>  // for debug
+
+// TODO Is LIFECYCLE_MSG enabled in 6.2.0, or 7.0.0, might require push
+// with version rework
+
+// debug messages viewable by setting 'proxy.config.diags.debug.tags'
+// in 'records.config'
+
+// debug messages during one-time initialization
+static const char DEBUG_TAG_INIT[] = "ssntxnorder_verify.init";
+
+// debug messages on every request serviced
+static const char DEBUG_TAG_HOOK[] = "ssntxnorder_verify.hook";
+
+// plugin registration info
+static char plugin_name[]   = "ssntxnorder_verify";
+static char vendor_name[]   = "Yahoo! Inc.";
+static char support_email[] = "ats-devel@yahoo-inc.com";
+
+static TSMutex order_mutex; // lock on global data
+
+// List of started sessions, SSN_START seen, SSN_CLOSE not seen yet.
+static std::set<TSHttpSsn> started_ssns;
+static int ssn_balance = 0; // +1 on SSN_START, -1 on SSN_CLOSE
+
+// Metadata for active transactions. Stored upon start to persist improper
+// closing behavior.
+typedef struct started_txn {
+  uint64_t id;
+  TSHttpTxn txnp;
+  TSHttpSsn ssnp;                      // enclosing session
+  started_txn(uint64_t id) : id(id) {} // used for lookup on id
+  started_txn(uint64_t id, TSHttpTxn txnp, TSHttpSsn ssnp) : id(id), txnp(txnp), ssnp(ssnp) {}
+} started_txn;
+
+// Comparator functor for transactions. Compare by ID.
+struct txn_compare {
+  bool
+  operator()(const started_txn &lhs, const started_txn &rhs) const
+  {
+    return lhs.id < rhs.id;
+  }
+};
+// List of started transactions, TXN_START seen, TXN_CLOSE not seen yet.
+static std::set<started_txn, txn_compare> started_txns;
+static int txn_balance = 0; // +1 on TXN_START -1 on TXN_CLOSE
+
+// Statistics provided by the plugin
+static int stat_ssn_close = 0; // number of TS_HTTP_SSN_CLOSE hooks caught
+static int stat_ssn_start = 0; // number of TS_HTTP_SSN_START hooks caught
+static int stat_txn_close = 0; // number of TS_HTTP_TXN_CLOSE hooks caught
+static int stat_txn_start = 0; // number of TS_HTTP_TXN_START hooks caught
+static int stat_err       = 0; // number of inaccuracies encountered
+
+// IPC information
+static char *ctl_tag         = plugin_name; // name is a convenient identifier
+static const char ctl_dump[] = "dump";      // output active ssn/txn tables cmd
+
+/**
+    This function is invoked upon TS_EVENT_LIFECYCLE_MSG. It outputs the
+    active SSN and TXN tables (the items that have not yet been closed).
+    Information displayed for transactions:
+        - TXN ID
+        - Enclosing SSN ID
+        - HTTP Protocol Version - 1.0 / 1.1 / 2.0 etc...
+    Information displayed for sessions:
+        - SSN ID
+*/
+static void
+dump_tables(void)
+{
+  TSDebug(DEBUG_TAG_HOOK, "Dumping active session and transaction tables.");
+  std::stringstream dump("");
+
+  dump << std::string(100, '+') << std::endl;
+
+  if (started_ssns.empty()) {
+    dump << "No active sessions could be found." << std::endl;
+  } else {
+    // Output for every active session
+    for (std::set<TSHttpSsn>::iterator it = started_ssns.begin(); it != started_ssns.end(); ++it) {
+      dump << "Session --> ID: " << *it << std::endl;
+    }
+  }
+
+  if (started_txns.empty()) {
+    dump << "No active transactions could be found." << std::endl;
+  } else {
+    // Output for every active transaction
+    for (std::set<started_txn, txn_compare>::iterator it = started_txns.begin(); it != started_txns.end(); ++it) {
+      dump << "Transaction --> ID: " << it->id << " ; Enclosing SSN ID: " << it->ssnp << " ;" << std::endl;
+    }
+  }
+  dump << std::string(100, '+') << std::endl;
+  std::cout << dump.str() << std::endl;
+}
+
+/**
+    This function is called on every request and logs session and transaction
+    start and close events. It is used upon initialization to install the hooks
+    to the corresponding events. Return value is irrelevant.
+*/
+static int
+handle_order(TSCont contp, TSEvent event, void *edata)
+{
+  TSHttpSsn ssnp;    // session data
+  TSHttpTxn txnp;    // transaction data
+  TSPluginMsg *msgp; // message data
+
+  // Find the event that happened
+  switch (event) {
+  case TS_EVENT_HTTP_SSN_CLOSE: // End of session
+  {
+    ssnp = reinterpret_cast<TSHttpSsn>(edata);
+    TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_HTTP_SSN_CLOSE [ SSNID = %p ]", ssnp);
+    TSStatIntIncrement(stat_ssn_close, 1);
+    if (started_ssns.erase(ssnp) == 0) {
+      // No record existsted for this session
+      TSError("Session [ SSNID = %p ] closing was not previously started", ssnp);
+      TSStatIntIncrement(stat_err, 1);
+    }
+
+    if (--ssn_balance < 0) {
+      TSError("More sessions have been closed than started.");
+      TSStatIntIncrement(stat_err, 1);
+    }
+
+    TSHttpSsnReenable(ssnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  }
+
+  case TS_EVENT_HTTP_SSN_START: // Beginning of session
+  {
+    ssnp = reinterpret_cast<TSHttpSsn>(edata);
+    TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_HTTP_SSN_START [ SSNID = %p ]", ssnp);
+    TSStatIntIncrement(stat_ssn_start, 1);
+
+    if (!started_ssns.insert(ssnp).second) {
+      // Insert failed. Session already existed in the record.
+      TSError("Session [ SSNID = %p ] has previously started.", ssnp);
+      TSStatIntIncrement(stat_err, 1);
+    }
+    ++ssn_balance;
+
+    TSHttpSsnReenable(ssnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  }
+
+  case TS_EVENT_HTTP_TXN_CLOSE: // End of transaction
+  {
+    txnp = reinterpret_cast<TSHttpTxn>(edata);
+    TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_HTTP_TXN_CLOSE [ TXNID = %" PRIu64 " ]", TSHttpTxnIdGet(txnp));
+    TSStatIntIncrement(stat_txn_close, 1);
+
+    std::set<started_txn>::iterator current_txn = started_txns.find(started_txn(TSHttpTxnIdGet(txnp)));
+
+    if (current_txn != started_txns.end()) {
+      // Transaction exists.
+
+      ssnp = current_txn->ssnp;
+      if (started_ssns.find(ssnp) == started_ssns.end()) {
+        // The session of the transaction was either not started, or was
+        // already closed.
+        TSError("Transaction [ TXNID = %" PRIu64 " ] closing not in an "
+                "active session [ SSNID = %p ].",
+                current_txn->id, ssnp);
+        TSStatIntIncrement(stat_err, 1);
+      }
+      started_txns.erase(current_txn); // Stop monitoring the transaction
+    } else {
+      // Transaction does not exists.
+      TSError("Transaction [ TXNID = %" PRIu64 " ] closing not "
+              "previously started.",
+              current_txn->id);
+      TSStatIntIncrement(stat_err, 1);
+    }
+
+    if (--txn_balance < 0) {
+      TSError("More transactions have been closed than started.");
+      TSStatIntIncrement(stat_err, 1);
+    }
+
+    TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  }
+
+  case TS_EVENT_HTTP_TXN_START: // Beginning of transaction
+  {
+    txnp = reinterpret_cast<TSHttpTxn>(edata);
+    ssnp = TSHttpTxnSsnGet(txnp);
+    TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_HTTP_TXN_START [ TXNID = %" PRIu64 " ]", TSHttpTxnIdGet(txnp));
+    TSStatIntIncrement(stat_txn_start, 1);
+
+    started_txn new_txn = started_txn(TSHttpTxnIdGet(txnp), txnp, ssnp);
+
+    if (started_ssns.find(ssnp) == started_ssns.end()) {
+      // Session of the transaction has not started.
+      TSError("Transaction [ TXNID = %" PRIu64 " ] starting not in an "
+              "active session [ SSNID = %p ].",
+              new_txn.id, ssnp);
+      TSStatIntIncrement(stat_err, 1);
+    }
+
+    if (!started_txns.insert(new_txn).second) {
+      // Insertion failed. Transaction has previously started.
+      TSError("Transaction [ TXNID = %" PRIu64 " ] has previously started.", new_txn.id);
+      TSStatIntIncrement(stat_err, 1);
+    }
+
+    ++txn_balance;
+
+    TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
+    break;
+  }
+
+#if ((TS_VERSION_MAJOR == 6 && TS_VERSION_MINOR >= 2) || TS_VERSION_MAJOR > 6)
+  case TS_EVENT_LIFECYCLE_MSG: // External trigger, such as traffic_ctl
+  {
+    TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_LIFECYCLE_MSG");
+    msgp = reinterpret_cast<TSPluginMsg *>(edata); // inconsistency
+
+    // Verify message is with the appropriate tag
+    if (!strcmp(ctl_tag, msgp->tag) && strncmp(ctl_dump, reinterpret_cast<const char *>(msgp->data), strlen(ctl_dump)) == 0) {
+      dump_tables();
+    }
+
+    break;
+  }
+#endif
+
+  // Just release the lock for all other states and do nothing
+  default:
+    break;
+  }
+
+  return 0;
+}
+
+/**
+    Entry point for the plugin.
+        - Attaches global hooks for session start and close.
+        - Attaches global hooks for transaction start and close.
+        - Attaches lifecycle hook for communication through traffic_ctl
+        - Initializes all statistics as described in the README
+*/
+void
+TSPluginInit(int argc, const char *argv[])
+{
+  TSDebug(DEBUG_TAG_INIT, "initializing plugin");
+
+  TSPluginRegistrationInfo info;
+
+  info.plugin_name   = plugin_name;
+  info.vendor_name   = vendor_name;
+  info.support_email = support_email;
+
+#if (TS_VERSION_MAJOR < 3)
+  if (TSPluginRegister(TS_SDK_VERSION_2_0, &info) != TS_SUCCESS) {
+#elif (TS_VERSION_MAJOR < 6)
+  if (TSPluginRegister(TS_SDK_VERSION_3_0, &info) != TS_SUCCESS) {
+#else
+  if (TSPluginRegister(&info) != TS_SUCCESS) {
+#endif
+    TSError("[%s] Plugin registration failed. \n", plugin_name);
+  }
+
+  order_mutex = TSMutexCreate();
+  TSCont contp;
+
+  contp = TSContCreate(handle_order, order_mutex);
+  if (contp == NULL) {
+    // Continuation initialization failed. Unrecoverable, report and exit.
+    TSError("[%s] could not create continuation", plugin_name);
+    abort();
+  } else {
+    // Continuation initialization succeeded.
+
+    stat_ssn_start = TSStatCreate("ssntxnorder_verify.ssn.start", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_ssn_close = TSStatCreate("ssntxnorder_verify.ssn.close", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_txn_start = TSStatCreate("ssntxnorder_verify.txn.start", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_txn_close = TSStatCreate("ssntxnorder_verify.txn.close", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+    stat_err       = TSStatCreate("ssntxnorder_verify.err", TS_RECORDDATATYPE_INT, TS_STAT_NON_PERSISTENT, TS_STAT_SYNC_SUM);
+
+    // Add all hooks.
+    TSHttpHookAdd(TS_HTTP_SSN_START_HOOK, contp);
+    TSHttpHookAdd(TS_HTTP_SSN_CLOSE_HOOK, contp);
+
+    TSHttpHookAdd(TS_HTTP_TXN_START_HOOK, contp);
+    TSHttpHookAdd(TS_HTTP_TXN_CLOSE_HOOK, contp);
+
+#if ((TS_VERSION_MAJOR == 6 && TS_VERSION_MINOR >= 2) || TS_VERSION_MAJOR > 6)
+    TSLifecycleHookAdd(TS_LIFECYCLE_MSG_HOOK, contp);
+#endif
+  }
+}
diff --git a/tests/tools/tcp_408_client.py b/tests/tools/tcp_408_client.py
deleted file mode 100644
index eb1d721..0000000
--- a/tests/tools/tcp_408_client.py
+++ /dev/null
@@ -1,63 +0,0 @@
-'''
-A simple command line interface to send/receive bytes over TCP.
-'''
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-
-import argparse
-import socket
-import sys
-import time
-
-def tcp_client(host, port, sleep, header, data):
-    pass
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    s.connect((host, port))
-    s.sendall(header.encode())
-    s.sendall(data[0].encode())
-    time.sleep(sleep)
-    s.shutdown(socket.SHUT_WR)
-    while True:
-        output = s.recv(4096)  # suggested bufsize from docs.python.org
-        if len(output) <= 0:
-            break
-        else:
-            sys.stdout.write(output.decode())
-    s.close()
-
-
-DESCRIPTION =\
-    """A simple command line interface to send/receive bytes over TCP.
-
-The full contents of the given file are sent via a TCP connection to the given
-host and port. Then data is read from the connection and printed to standard
-output. Streaming is not supported."""
-
-
-def main(argv):
-    parser = argparse.ArgumentParser(description=DESCRIPTION)
-    parser.add_argument('host', help='the target host')
-    parser.add_argument('port', type=int, help='the target port')
-    parser.add_argument('sleep', type=int, help='timeout')
-    args = parser.parse_args()
-
-    header = 'POST / HTTP/1.1\r\nHost: www.example.com\r\nContent-Length: 2\r\n\r\n'
-    data = "aa"
-    tcp_client(args.host, args.port, args.sleep, header, data)
-
-
-if __name__ == "__main__":
-    main(sys.argv)
diff --git a/tests/tools/tcp_client.py b/tests/tools/tcp_client.py
index 71f14e7..fba05c4 100644
--- a/tests/tools/tcp_client.py
+++ b/tests/tools/tcp_client.py
@@ -20,13 +20,14 @@ A simple command line interface to send/receive bytes over TCP.
 import argparse
 import socket
 import sys
+import time
 
-
-def tcp_client(host, port, data):
-    pass
+def tcp_client(host, port, data, closeDelaySeconds=0):
     s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     s.connect((host, port))
     s.sendall(data.encode())
+    if closeDelaySeconds > 0:
+        time.sleep(closeDelaySeconds)
     s.shutdown(socket.SHUT_WR)
     while True:
         output = s.recv(4096)  # suggested bufsize from docs.python.org
@@ -38,25 +39,31 @@ def tcp_client(host, port, data):
 
 
 DESCRIPTION =\
-    """A simple command line interface to send/receive bytes over TCP.
+    """
+A simple command line interface to send/receive bytes over TCP.
 
 The full contents of the given file are sent via a TCP connection to the given
 host and port. Then data is read from the connection and printed to standard
-output. Streaming is not supported."""
+output. Streaming is not supported.
+
+If a delay is specified, the client will pause before closing the connections
+to further writes. This is useful to simulate the sending of partial data.
+"""
 
 
 def main(argv):
-    parser = argparse.ArgumentParser(description=DESCRIPTION)
+    parser = argparse.ArgumentParser(description=DESCRIPTION,\
+            formatter_class=argparse.RawDescriptionHelpFormatter)
     parser.add_argument('host', help='the target host')
     parser.add_argument('port', type=int, help='the target port')
     parser.add_argument('file', help='the file with content to be sent')
+    parser.add_argument('--delay-after-send', metavar='SECONDS', type=int, help='after send, delay in seconds before half-close', default=0 )
     args = parser.parse_args()
 
     data = ''
     with open(args.file, 'r') as f:
         data = f.read()
-
-    tcp_client(args.host, args.port, data)
+    tcp_client(args.host, args.port, data, args.delay_after_send)
 
 
 if __name__ == "__main__":
diff --git a/tests/tools/traffic-replay/Config.py b/tests/tools/traffic-replay/Config.py
index 1b97227..48d3fc3 100644
--- a/tests/tools/traffic-replay/Config.py
+++ b/tests/tools/traffic-replay/Config.py
@@ -30,5 +30,5 @@ proxy_nonssl_port = 8080
 nProcess = 4
 nThread = 4
 
-#colorize output
+# colorize output
 colorize = True
diff --git a/tests/tools/traffic-replay/RandomReplay.py b/tests/tools/traffic-replay/RandomReplay.py
index ef912f5..cf241d7 100644
--- a/tests/tools/traffic-replay/RandomReplay.py
+++ b/tests/tools/traffic-replay/RandomReplay.py
@@ -38,6 +38,7 @@ import itertools
 import random
 bSTOP = False
 
+
 def session_replay(input, proxy, result_queue):
     global bSTOP
     ''' Replay all transactions in session 
@@ -55,13 +56,13 @@ def session_replay(input, proxy, result_queue):
             with requests.Session() as request_session:
                 request_session.proxies = proxy
                 for txn in session.getTransactionIter():
-                    type = random.randint(1,1000)
+                    type = random.randint(1, 1000)
                     try:
-                        if type%3 == 0:
+                        if type % 3 == 0:
                             NonSSL.txn_replay(session._filename, txn, proxy, result_queue, request_session)
-                        elif type%3 == 1:
+                        elif type % 3 == 1:
                             SSLReplay.txn_replay(session._filename, txn, proxy, result_queue, request_session)
-                        elif type%3 == 2:
+                        elif type % 3 == 2:
                             h2Replay.txn_replay(session._filename, txn, proxy, result_queue, request_session)
                     except:
                         e = sys.exc_info()
@@ -71,10 +72,11 @@ def session_replay(input, proxy, result_queue):
         input.put('STOP')
         break
 
+
 def client_replay(input, proxy, result_queue, nThread):
     Threads = []
     for i in range(nThread):
-    
+
         t2 = Thread(target=SSLReplay.session_replay, args=[input, proxy, result_queue])
         t = Thread(target=NonSSL.session_replay, args=[input, proxy, result_queue])
         t1 = Thread(target=h2Replay.session_replay, args=[input, proxy, result_queue])
@@ -85,6 +87,5 @@ def client_replay(input, proxy, result_queue, nThread):
         Threads.append(t2)
         Threads.append(t1)
 
-
     for t1 in Threads:
         t1.join()
diff --git a/tests/tools/traffic-replay/WorkerTask.py b/tests/tools/traffic-replay/WorkerTask.py
index 45d4a87..839e696 100644
--- a/tests/tools/traffic-replay/WorkerTask.py
+++ b/tests/tools/traffic-replay/WorkerTask.py
@@ -43,7 +43,7 @@ def worker(input, output, proxy, replay_type, nThread):
         h2Replay.client_replay(input, proxy, output, nThread)
     elif replay_type == 'random':
         RandomReplay.client_replay(input, proxy, output, nThread)
-        
+
         # progress_bar.next()
     # progress_bar.finish()
     print("process{0} has exited".format(current_process().name))
diff --git a/tests/tools/traffic-replay/__main__.py b/tests/tools/traffic-replay/__main__.py
index c19900b..0f6074a 100644
--- a/tests/tools/traffic-replay/__main__.py
+++ b/tests/tools/traffic-replay/__main__.py
@@ -28,15 +28,16 @@ if __name__ == '__main__':
     parser.add_argument("-type", action='store', dest='replay_type', help="Replay type: ssl/random/h2/nossl")
     parser.add_argument("-log_dir", type=str, help="directory of JSON replay files")
     parser.add_argument("-v", dest="verbose", help="verify response status code", action="store_true")
-    parser.add_argument("-host", help="proxy/host to send the requests to",default=Config.proxy_host)
-    parser.add_argument("-port",type=int,help=" The non secure port of ATS to send the request to",default=Config.proxy_nonssl_port)
-    parser.add_argument("-s_port",type=int,help="secure port",default=Config.proxy_ssl_port)
-    parser.add_argument("-ca_cert",help="Certificate to present",default=Config.ca_certs)
-    parser.add_argument("-colorize",type=str,help="specify whether to use colorize the output",default='True')
+    parser.add_argument("-host", help="proxy/host to send the requests to", default=Config.proxy_host)
+    parser.add_argument("-port", type=int, help=" The non secure port of ATS to send the request to",
+                        default=Config.proxy_nonssl_port)
+    parser.add_argument("-s_port", type=int, help="secure port", default=Config.proxy_ssl_port)
+    parser.add_argument("-ca_cert", help="Certificate to present", default=Config.ca_certs)
+    parser.add_argument("-colorize", type=str, help="specify whether to use colorize the output", default='True')
 
     args = parser.parse_args()
 
     # Let 'er loose
     #main(args.log_dir, args.hostname, int(args.port), args.threads, args.timing, args.verbose)
     Config.colorize = True if args.colorize == 'True' else False
-    mainProcess.main(args.log_dir, args.replay_type, args.verbose, pHost = args.host, pNSSLport = args.port, pSSL = args.s_port)
+    mainProcess.main(args.log_dir, args.replay_type, args.verbose, pHost=args.host, pNSSLport=args.port, pSSL=args.s_port)
diff --git a/tests/tools/traffic-replay/mainProcess.py b/tests/tools/traffic-replay/mainProcess.py
index 26a3513..bde8de4 100644
--- a/tests/tools/traffic-replay/mainProcess.py
+++ b/tests/tools/traffic-replay/mainProcess.py
@@ -65,7 +65,7 @@ def check_for_ats(hostname, port):
 # Hopefully this isn't an issue because multi-line headers are deprecated now
 
 
-def main(path, replay_type, Bverbose, pHost = Config.proxy_host, pNSSLport = Config.proxy_nonssl_port, pSSL = Config.proxy_ssl_port):
+def main(path, replay_type, Bverbose, pHost=Config.proxy_host, pNSSLport=Config.proxy_nonssl_port, pSSL=Config.proxy_ssl_port):
     global verbose
     verbose = Bverbose
     check_for_ats(pHost, pNSSLport)
diff --git a/example/thread-pool/include/Makefile.am b/tests/unit_tests/Makefile.am
similarity index 89%
rename from example/thread-pool/include/Makefile.am
rename to tests/unit_tests/Makefile.am
index 7eefc11..e375361 100644
--- a/example/thread-pool/include/Makefile.am
+++ b/tests/unit_tests/Makefile.am
@@ -1,3 +1,4 @@
+#
 #  Licensed to the Apache Software Foundation (ASF) under one
 #  or more contributor license agreements.  See the NOTICE file
 #  distributed with this work for additional information
@@ -14,10 +15,10 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-all:  gen
+AM_CPPFLAGS += \
+  -I$(abs_top_srcdir)
+
+bin_PROGRAMS = unit_tests
 
-gen: gen.c
-	$(CC) -o gen gen.c
+unit_tests_SOURCES = main.cpp
 
-clean-local:
-	rm -f gen gen.o
diff --git a/tools/clang-format.sh b/tools/clang-format.sh
index 6820394..d69d0cd 100755
--- a/tools/clang-format.sh
+++ b/tools/clang-format.sh
@@ -18,65 +18,74 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-set -e # exit on error
-
 # Update the PKGDATE with the new version date when making a new clang-format binary package.
 PKGDATE="20170404"
-DIR=${1:-.}
-ROOT=${ROOT:-$(cd $(dirname $0) && git rev-parse --show-toplevel)/.git/fmt/${PKGDATE}}
-PACKAGE="clang-format-${PKGDATE}.tar.bz2"
-VERSION="clang-format version 4.0.1 (http://llvm.org/git/clang.git 559aa046fe3260d8640791f2249d7b0d458b5700) (http://llvm.org/git/llvm.git 08142cb734b8d2cefec8b1629f6bb170b3f94610)"
 
-URL=${URL:-https://ci.trafficserver.apache.org/bintray/${PACKAGE}}
+function main() {
+  set -e # exit on error
+  ROOT=${ROOT:-$(cd $(dirname $0) && git rev-parse --show-toplevel)/.git/fmt/${PKGDATE}}
 
-TAR=${TAR:-tar}
-CURL=${CURL:-curl}
+  DIR=${1:-.}
+  PACKAGE="clang-format-${PKGDATE}.tar.bz2"
+  VERSION="clang-format version 4.0.1 (http://llvm.org/git/clang.git 559aa046fe3260d8640791f2249d7b0d458b5700) (http://llvm.org/git/llvm.git 08142cb734b8d2cefec8b1629f6bb170b3f94610)"
 
-# default to using native sha1sum command when available
-if [ $(which sha1sum) ] ; then
-  SHASUM=${SHASUM:-sha1sum}
-else
-  SHASUM=${SHASUM:-shasum}
-fi
+  URL=${URL:-https://ci.trafficserver.apache.org/bintray/${PACKAGE}}
+
+  TAR=${TAR:-tar}
+  CURL=${CURL:-curl}
 
-ARCHIVE=$ROOT/$(basename ${URL})
+  # default to using native sha1sum command when available
+  if [ $(which sha1sum) ] ; then
+    SHASUM=${SHASUM:-sha1sum}
+  else
+    SHASUM=${SHASUM:-shasum}
+  fi
 
-case $(uname -s) in
-Darwin)
-  FORMAT=${FORMAT:-${ROOT}/clang-format/clang-format.osx}
-  ;;
-Linux)
-  FORMAT=${FORMAT:-${ROOT}/clang-format/clang-format.linux}
-  ;;
-*)
-  echo "Leif needs to build a clang-format for $(uname -s)"
-  exit 2
-esac
+  ARCHIVE=$ROOT/$(basename ${URL})
 
-mkdir -p ${ROOT}
+  case $(uname -s) in
+  Darwin)
+    FORMAT=${FORMAT:-${ROOT}/clang-format/clang-format.osx}
+    ;;
+  Linux)
+    FORMAT=${FORMAT:-${ROOT}/clang-format/clang-format.linux}
+    ;;
+  *)
+    echo "Leif needs to build a clang-format for $(uname -s)"
+    exit 2
+  esac
 
-# Note that the two spaces between the hash and ${ARCHIVE) is needed
-if [ ! -e ${FORMAT} -o ! -e ${ROOT}/${PACKAGE} ] ; then
-  ${CURL} -L --progress-bar -o ${ARCHIVE} ${URL}
-  ${TAR} -x -C ${ROOT} -f ${ARCHIVE}
-  cat > ${ROOT}/sha1 << EOF
+  mkdir -p ${ROOT}
+
+  # Note that the two spaces between the hash and ${ARCHIVE) is needed
+  if [ ! -e ${FORMAT} -o ! -e ${ROOT}/${PACKAGE} ] ; then
+    ${CURL} -L --progress-bar -o ${ARCHIVE} ${URL}
+    ${TAR} -x -C ${ROOT} -f ${ARCHIVE}
+    cat > ${ROOT}/sha1 << EOF
 ebd00097e5e16d6895d6572638cf354d705f9fcf  ${ARCHIVE}
 EOF
-  ${SHASUM} -c ${ROOT}/sha1
-  chmod +x ${FORMAT}
-fi
+    ${SHASUM} -c ${ROOT}/sha1
+    chmod +x ${FORMAT}
+  fi
+
 
+  # Make sure we only run this with our exact version
+  ver=$(${FORMAT} --version)
+  if [ "$ver" != "$VERSION" ]; then
+      echo "Wrong version of clang-format!"
+      echo "See https://bintray.com/apache/trafficserver/clang-format-tools/view for a newer version,"
+      echo "or alternatively, undefine the FORMAT environment variable"
+      exit 1
+  else
+      for file in $(find $DIR -iname \*.[ch] -o -iname \*.cc); do
+    echo $file
+    ${FORMAT} -i $file
+      done
+  fi
+}
 
-# Make sure we only run this with our exact version
-ver=$(${FORMAT} --version)
-if [ "$ver" != "$VERSION" ]; then
-    echo "Wrong version of clang-format!"
-    echo "See https://bintray.com/apache/trafficserver/clang-format-tools/view for a newer version,"
-    echo "or alternatively, undefine the FORMAT environment variable"
-    exit 1
+if [[ "$(basename -- "$0")" == 'clang-format.sh' ]]; then
+  main "$@"
 else
-    for file in $(find $DIR -iname \*.[ch] -o -iname \*.cc); do
-	echo $file
-	${FORMAT} -i $file
-    done
+  ROOT=${ROOT:-$(git rev-parse --show-toplevel)/.git/fmt/${PKGDATE}}
 fi
diff --git a/tools/git/pre-commit b/tools/git/pre-commit
index 3e9eca5..3aee8e7 100755
--- a/tools/git/pre-commit
+++ b/tools/git/pre-commit
@@ -18,7 +18,9 @@
 #  See the License for the specific language governing permissions and
 #  limitations under the License.
 
-ROOT=$(git rev-parse --show-toplevel)/.git/fmt
+GIT_TOP="$(git rev-parse --show-toplevel)"
+source "$GIT_TOP/tools/clang-format.sh"
+
 case $(uname -s) in
 Darwin)
     FORMAT=${FORMAT:-${ROOT}/clang-format/clang-format.osx}
diff --git a/tools/jtest/jtest.cc b/tools/jtest/jtest.cc
index 1e53c2d..c351048 100644
--- a/tools/jtest/jtest.cc
+++ b/tools/jtest/jtest.cc
@@ -548,7 +548,7 @@ max_limit_fd()
   if (getrlimit(RLIMIT_NOFILE, &rl) >= 0) {
 #ifdef OPEN_MAX
     // Darwin
-    rl.rlim_cur = MIN(OPEN_MAX, rl.rlim_max);
+    rl.rlim_cur = std::min(static_cast<rlim_t>(OPEN_MAX), rl.rlim_max);
 #else
     rl.rlim_cur = rl.rlim_max;
 #endif
@@ -2140,7 +2140,7 @@ compose_all_urls(const char *tag, char *buf, char *start, char *end, int buflen,
   char old;
   while ((start = find_href_start(tag, end, buflen - (end - buf)))) {
     char newurl[512];
-    end = (char *)find_href_end(start, MIN(buflen - (start - buf), 512 - 10));
+    end = (char *)find_href_end(start, std::min(static_cast<int>(buflen - (start - buf)), 512 - 10));
     if (!end) {
       end = start + strlen(tag);
       continue;

-- 
To stop receiving notification emails like this one, please contact
"commits@trafficserver.apache.org" <co...@trafficserver.apache.org>.