You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2020/09/03 19:08:57 UTC

[trafficserver] branch 9.0.x updated (0275eff -> 1cef2fd)

This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a change to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git.


    from 0275eff  Fix #7116, skip the insertion of the same continuation to pending dns (#7117)
     new 40e9d41  Convert Mime and URL unit tests in proxy/hdrs to Catch.
     new 53ddfaf  Optimize HTTPHdr conversion of HTTP/1.1 to HTTP/2
     new 998d61e  URL::parse fixes for empty paths (#7119)
     new 01abc6f  Cleanup: Remove useless UDPConnection function
     new 1cef2fd  Replace ACTION_RESULT_NONE with nullptr (#7135)

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../api/functions/TSUrlHostGet.en.rst              |   2 +-
 include/ts/ts.h                                    |   4 +-
 iocore/eventsystem/I_Action.h                      |   2 -
 iocore/net/I_UDPConnection.h                       |   6 +-
 iocore/net/P_UDPConnection.h                       |  50 +--
 iocore/net/P_UnixUDPConnection.h                   |   5 +-
 iocore/net/UnixNetProcessor.cc                     |   2 +-
 iocore/net/UnixUDPConnection.cc                    |   4 +-
 plugins/header_rewrite/operators.cc                |   4 +-
 proxy/hdrs/Makefile.am                             |  17 +-
 proxy/hdrs/URL.cc                                  | 193 ++++-----
 proxy/hdrs/URL.h                                   | 102 ++++-
 proxy/hdrs/test_mime.cc                            | 104 -----
 proxy/hdrs/unit_tests/test_URL.cc                  | 468 +++++++++++++++++++++
 proxy/hdrs/unit_tests/test_mime.cc                 | 249 +++++++++++
 proxy/http/remap/RemapConfig.cc                    |  10 +-
 proxy/http2/HTTP2.cc                               | 231 +++++++---
 proxy/http2/HTTP2.h                                |  16 +-
 proxy/http2/Http2ConnectionState.cc                |  61 ++-
 proxy/http2/Http2Stream.cc                         |   6 +-
 proxy/http2/Makefile.am                            |   1 +
 proxy/http2/unit_tests/test_HTTP2.cc               | 169 ++++++++
 proxy/http3/Http3HeaderFramer.cc                   |  11 +-
 proxy/http3/Http3HeaderFramer.h                    |   2 +-
 tests/gold_tests/autest-site/microDNS.test.ext     |   2 +-
 .../header_rewrite/gold/set-redirect.gold          |   8 +
 ...e_client.test.py => header_rewrite_url.test.py} |  42 +-
 .../rules/{rule.conf => set_redirect.conf}         |   3 +-
 tests/gold_tests/remap/gold/remap-zero-200.gold    |   7 +
 ...{remap_ip_resolve.test.py => regex_map.test.py} |  48 +--
 30 files changed, 1381 insertions(+), 448 deletions(-)
 delete mode 100644 proxy/hdrs/test_mime.cc
 create mode 100644 proxy/hdrs/unit_tests/test_URL.cc
 create mode 100644 proxy/hdrs/unit_tests/test_mime.cc
 create mode 100644 proxy/http2/unit_tests/test_HTTP2.cc
 create mode 100644 tests/gold_tests/pluginTest/header_rewrite/gold/set-redirect.gold
 rename tests/gold_tests/pluginTest/header_rewrite/{header_rewrite_client.test.py => header_rewrite_url.test.py} (58%)
 copy tests/gold_tests/pluginTest/header_rewrite/rules/{rule.conf => set_redirect.conf} (95%)
 create mode 100644 tests/gold_tests/remap/gold/remap-zero-200.gold
 copy tests/gold_tests/remap/{remap_ip_resolve.test.py => regex_map.test.py} (54%)


[trafficserver] 05/05: Replace ACTION_RESULT_NONE with nullptr (#7135)

Posted by zw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 1cef2fd442cd6089f50d19f8889a7b4a9c390780
Author: Susan Hinrichs <sh...@yahoo-inc.com>
AuthorDate: Tue Sep 1 09:55:07 2020 -0500

    Replace ACTION_RESULT_NONE with nullptr (#7135)
    
    (cherry picked from commit 720cd0c120be28505e85db37f1f5cf489f379934)
---
 iocore/eventsystem/I_Action.h    | 2 --
 iocore/net/I_UDPConnection.h     | 2 +-
 iocore/net/P_UnixUDPConnection.h | 2 +-
 iocore/net/UnixNetProcessor.cc   | 2 +-
 iocore/net/UnixUDPConnection.cc  | 4 ++--
 5 files changed, 5 insertions(+), 7 deletions(-)

diff --git a/iocore/eventsystem/I_Action.h b/iocore/eventsystem/I_Action.h
index edd9159..a0b500e 100644
--- a/iocore/eventsystem/I_Action.h
+++ b/iocore/eventsystem/I_Action.h
@@ -193,10 +193,8 @@ public:
   virtual ~Action() {}
 };
 
-#define ACTION_RESULT_NONE MAKE_ACTION_RESULT(0)
 #define ACTION_RESULT_DONE MAKE_ACTION_RESULT(1)
 #define ACTION_IO_ERROR MAKE_ACTION_RESULT(2)
-#define ACTION_RESULT_INLINE MAKE_ACTION_RESULT(3)
 
 // Use these classes by
 // #define ACTION_RESULT_HOST_DB_OFFLINE
diff --git a/iocore/net/I_UDPConnection.h b/iocore/net/I_UDPConnection.h
index 4a17973..5156eb8 100644
--- a/iocore/net/I_UDPConnection.h
+++ b/iocore/net/I_UDPConnection.h
@@ -73,7 +73,7 @@ public:
      <br>
      cont->handleEvent(NET_EVENT_DATAGRAM_READ_READY, Queue&lt;UDPPacketInternal&gt; *) on incoming packets.
 
-     @return Action* Always returns ACTION_RESULT_NONE.  Can't be
+     @return Action* Always returns nullptr.  Can't be
      cancelled via this Action.
      @param c continuation to be called back
    */
diff --git a/iocore/net/P_UnixUDPConnection.h b/iocore/net/P_UnixUDPConnection.h
index 37ad8bd..62b47f6 100644
--- a/iocore/net/P_UnixUDPConnection.h
+++ b/iocore/net/P_UnixUDPConnection.h
@@ -98,7 +98,7 @@ UDPConnection::recv(Continuation *c)
   p->continuation = c;
   ink_assert(c != nullptr);
   mutex = c->mutex;
-  return ACTION_RESULT_NONE;
+  return nullptr;
 }
 
 TS_INLINE UDPConnection *
diff --git a/iocore/net/UnixNetProcessor.cc b/iocore/net/UnixNetProcessor.cc
index c8bc1e8..5d8c71a 100644
--- a/iocore/net/UnixNetProcessor.cc
+++ b/iocore/net/UnixNetProcessor.cc
@@ -181,7 +181,7 @@ Action *
 UnixNetProcessor::connect_re_internal(Continuation *cont, sockaddr const *target, NetVCOptions *opt)
 {
   if (TSSystemState::is_event_system_shut_down()) {
-    return ACTION_RESULT_NONE;
+    return nullptr;
   }
   EThread *t             = eventProcessor.assign_affinity_by_type(cont, opt->etype);
   UnixNetVConnection *vc = (UnixNetVConnection *)this->allocate_vc(t);
diff --git a/iocore/net/UnixUDPConnection.cc b/iocore/net/UnixUDPConnection.cc
index 4d1eaf3..57c8814 100644
--- a/iocore/net/UnixUDPConnection.cc
+++ b/iocore/net/UnixUDPConnection.cc
@@ -121,7 +121,7 @@ UDPConnection::send(Continuation *c, UDPPacket *xp)
   if (shouldDestroy()) {
     ink_assert(!"freeing packet sent on dead connection");
     p->free();
-    return ACTION_RESULT_NONE;
+    return nullptr;
   }
 
   ink_assert(mutex == c->mutex);
@@ -132,7 +132,7 @@ UDPConnection::send(Continuation *c, UDPPacket *xp)
   mutex               = c->mutex;
   p->reqGenerationNum = conn->sendGenerationNum;
   get_UDPNetHandler(conn->ethread)->udpOutQueue.send(p);
-  return ACTION_RESULT_NONE;
+  return nullptr;
 }
 
 void


[trafficserver] 04/05: Cleanup: Remove useless UDPConnection function

Posted by zw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 01abc6f5d60524562692ef98c809ddc526b5da90
Author: scw00 <sc...@apache.org>
AuthorDate: Tue Nov 19 09:18:53 2019 +0800

    Cleanup: Remove useless UDPConnection function
    
    (cherry picked from commit 0ec8ca4baa933fb0484e7767a0864fb5522b6734)
---
 iocore/net/I_UDPConnection.h     |  4 +---
 iocore/net/P_UDPConnection.h     | 50 +++++++++-------------------------------
 iocore/net/P_UnixUDPConnection.h |  3 +--
 3 files changed, 13 insertions(+), 44 deletions(-)

diff --git a/iocore/net/I_UDPConnection.h b/iocore/net/I_UDPConnection.h
index d06920f..4a17973 100644
--- a/iocore/net/I_UDPConnection.h
+++ b/iocore/net/I_UDPConnection.h
@@ -49,7 +49,7 @@ public:
   SOCKET getFd();
   void setBinding(struct sockaddr const *);
   void setBinding(const IpAddr &, in_port_t);
-  inkcoreapi int getBinding(struct sockaddr *);
+  inkcoreapi bool getBinding(struct sockaddr *);
 
   void destroy();
   int shouldDestroy();
@@ -86,8 +86,6 @@ public:
   int getPortNum();
 
   int GetSendGenerationNumber(); // const
-  void SetLastSentPktTSSeqNum(int64_t sentSeqNum);
-  int64_t cancel();
   void setContinuation(Continuation *c);
 
   /**
diff --git a/iocore/net/P_UDPConnection.h b/iocore/net/P_UDPConnection.h
index ef69b31..10e57c2 100644
--- a/iocore/net/P_UDPConnection.h
+++ b/iocore/net/P_UDPConnection.h
@@ -35,19 +35,17 @@
 class UDPConnectionInternal : public UDPConnection
 {
 public:
-  UDPConnectionInternal();
+  UDPConnectionInternal() = default;
   ~UDPConnectionInternal() override;
 
   Continuation *continuation = nullptr;
-  int recvActive             = 0; // interested in receiving
   int refcount               = 0; // public for assertion
 
-  SOCKET fd;
-  IpEndpoint binding;
-  int binding_valid = 0;
-  int tobedestroyed = 0;
-  int sendGenerationNum;
-  int64_t lastSentPktTSSeqNum;
+  SOCKET fd = -1;
+  IpEndpoint binding{};
+  bool binding_valid    = false;
+  int tobedestroyed     = 0;
+  int sendGenerationNum = 0;
 
   // this is for doing packet scheduling: we keep two values so that we can
   // implement cancel.  The first value tracks the startTime of the last
@@ -55,21 +53,11 @@ public:
   // startTime of the last packet when we are doing scheduling;  whenever the
   // associated continuation cancels a packet, we rest lastPktStartTime to be
   // the same as the lastSentPktStartTime.
-  uint64_t lastSentPktStartTime;
-  uint64_t lastPktStartTime;
+  uint64_t lastSentPktStartTime = 0;
+  uint64_t lastPktStartTime     = 0;
 };
 
 TS_INLINE
-UDPConnectionInternal::UDPConnectionInternal() : fd(-1)
-{
-  sendGenerationNum    = 0;
-  lastSentPktTSSeqNum  = -1;
-  lastSentPktStartTime = 0;
-  lastPktStartTime     = 0;
-  memset(&binding, 0, sizeof binding);
-}
-
-TS_INLINE
 UDPConnectionInternal::~UDPConnectionInternal()
 {
   continuation = nullptr;
@@ -87,7 +75,7 @@ UDPConnection::setBinding(struct sockaddr const *s)
 {
   UDPConnectionInternal *p = static_cast<UDPConnectionInternal *>(this);
   ats_ip_copy(&p->binding, s);
-  p->binding_valid = 1;
+  p->binding_valid = true;
 }
 
 TS_INLINE void
@@ -97,10 +85,10 @@ UDPConnection::setBinding(IpAddr const &ip, in_port_t port)
   IpEndpoint addr;
   addr.assign(ip, htons(port));
   ats_ip_copy(&p->binding, addr);
-  p->binding_valid = 1;
+  p->binding_valid = true;
 }
 
-TS_INLINE int
+TS_INLINE bool
 UDPConnection::getBinding(struct sockaddr *s)
 {
   UDPConnectionInternal *p = static_cast<UDPConnectionInternal *>(this);
@@ -144,22 +132,6 @@ UDPConnection::getPortNum()
   return ats_ip_port_host_order(&static_cast<UDPConnectionInternal *>(this)->binding);
 }
 
-TS_INLINE int64_t
-UDPConnection::cancel()
-{
-  UDPConnectionInternal *p = static_cast<UDPConnectionInternal *>(this);
-
-  p->sendGenerationNum++;
-  p->lastPktStartTime = p->lastSentPktStartTime;
-  return p->lastSentPktTSSeqNum;
-}
-
-TS_INLINE void
-UDPConnection::SetLastSentPktTSSeqNum(int64_t sentSeqNum)
-{
-  static_cast<UDPConnectionInternal *>(this)->lastSentPktTSSeqNum = sentSeqNum;
-}
-
 TS_INLINE void
 UDPConnection::setContinuation(Continuation *c)
 {
diff --git a/iocore/net/P_UnixUDPConnection.h b/iocore/net/P_UnixUDPConnection.h
index 9e3c2f2..37ad8bd 100644
--- a/iocore/net/P_UnixUDPConnection.h
+++ b/iocore/net/P_UnixUDPConnection.h
@@ -97,8 +97,7 @@ UDPConnection::recv(Continuation *c)
   // register callback interest.
   p->continuation = c;
   ink_assert(c != nullptr);
-  mutex         = c->mutex;
-  p->recvActive = 1;
+  mutex = c->mutex;
   return ACTION_RESULT_NONE;
 }
 


[trafficserver] 02/05: Optimize HTTPHdr conversion of HTTP/1.1 to HTTP/2

Posted by zw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 53ddfaf8e0e3e0efb0151b28f4fe218bae7d4963
Author: Masaori Koshiba <ma...@apache.org>
AuthorDate: Tue Mar 24 08:45:01 2020 +0900

    Optimize HTTPHdr conversion of HTTP/1.1 to HTTP/2
    
    Avoid extra HdrHeap allocation and copy all headers.
    
    (cherry picked from commit 3b0716d9f61ad276beab2d6d7a74d804b5b52cd5)
---
 proxy/http2/HTTP2.cc                 | 231 ++++++++++++++++++++++++-----------
 proxy/http2/HTTP2.h                  |  16 ++-
 proxy/http2/Http2ConnectionState.cc  |  61 ++++-----
 proxy/http2/Http2Stream.cc           |   6 +-
 proxy/http2/Makefile.am              |   1 +
 proxy/http2/unit_tests/test_HTTP2.cc | 169 +++++++++++++++++++++++++
 proxy/http3/Http3HeaderFramer.cc     |  11 +-
 proxy/http3/Http3HeaderFramer.h      |   2 +-
 8 files changed, 385 insertions(+), 112 deletions(-)

diff --git a/proxy/http2/HTTP2.cc b/proxy/http2/HTTP2.cc
index 6616f03..bb4b4df 100644
--- a/proxy/http2/HTTP2.cc
+++ b/proxy/http2/HTTP2.cc
@@ -45,6 +45,16 @@ const unsigned HTTP2_LEN_STATUS    = countof(":status") - 1;
 static size_t HTTP2_LEN_STATUS_VALUE_STR         = 3;
 static const uint32_t HTTP2_MAX_TABLE_SIZE_LIMIT = 64 * 1024;
 
+namespace
+{
+struct Http2HeaderName {
+  const char *name = nullptr;
+  int name_len     = 0;
+};
+
+Http2HeaderName http2_connection_specific_headers[5] = {};
+} // namespace
+
 // Statistics
 RecRawStatBlock *http2_rsb;
 static const char *const HTTP2_STAT_CURRENT_CLIENT_CONNECTION_NAME        = "proxy.process.http2.current_client_connections";
@@ -501,87 +511,146 @@ http2_convert_header_from_2_to_1_1(HTTPHdr *headers)
   return PARSE_RESULT_DONE;
 }
 
+/**
+  Initialize HTTPHdr for HTTP/2
+
+  Reserve HTTP/2 Pseudo-Header Fields in front of HTTPHdr. Value of these header fields will be set by
+  `http2_convert_header_from_1_1_to_2()`. When a HTTPHdr for HTTP/2 headers is created, this should be called immediately.
+  Because all pseudo-header fields MUST appear in the header block before regular header fields.
+ */
 void
-http2_generate_h2_header_from_1_1(HTTPHdr *headers, HTTPHdr *h2_headers)
+http2_init_pseudo_headers(HTTPHdr &hdr)
 {
-  h2_headers->create(http_hdr_type_get(headers->m_http));
+  switch (http_hdr_type_get(hdr.m_http)) {
+  case HTTP_TYPE_REQUEST: {
+    MIMEField *method = hdr.field_create(HTTP2_VALUE_METHOD, HTTP2_LEN_METHOD);
+    hdr.field_attach(method);
+
+    MIMEField *scheme = hdr.field_create(HTTP2_VALUE_SCHEME, HTTP2_LEN_SCHEME);
+    hdr.field_attach(scheme);
+
+    MIMEField *authority = hdr.field_create(HTTP2_VALUE_AUTHORITY, HTTP2_LEN_AUTHORITY);
+    hdr.field_attach(authority);
+
+    MIMEField *path = hdr.field_create(HTTP2_VALUE_PATH, HTTP2_LEN_PATH);
+    hdr.field_attach(path);
+
+    break;
+  }
+  case HTTP_TYPE_RESPONSE: {
+    MIMEField *status = hdr.field_create(HTTP2_VALUE_STATUS, HTTP2_LEN_STATUS);
+    hdr.field_attach(status);
+
+    break;
+  }
+  default:
+    ink_abort("HTTP_TYPE_UNKNOWN");
+  }
+}
+
+/**
+  Convert HTTP/1.1 HTTPHdr to HTTP/2
+
+  Assuming HTTP/2 Pseudo-Header Fields are reserved by `http2_init_pseudo_headers()`.
+ */
+ParseResult
+http2_convert_header_from_1_1_to_2(HTTPHdr *headers)
+{
+  switch (http_hdr_type_get(headers->m_http)) {
+  case HTTP_TYPE_REQUEST: {
+    // :method
+    if (MIMEField *field = headers->field_find(HTTP2_VALUE_METHOD, HTTP2_LEN_METHOD); field != nullptr) {
+      int value_len;
+      const char *value = headers->method_get(&value_len);
+
+      field->value_set(headers->m_heap, headers->m_mime, value, value_len);
+    } else {
+      ink_abort("initialize HTTP/2 pseudo-headers");
+      return PARSE_RESULT_ERROR;
+    }
+
+    // :scheme
+    if (MIMEField *field = headers->field_find(HTTP2_VALUE_SCHEME, HTTP2_LEN_SCHEME); field != nullptr) {
+      int value_len;
+      const char *value = headers->scheme_get(&value_len);
+
+      if (value != nullptr) {
+        field->value_set(headers->m_heap, headers->m_mime, value, value_len);
+      } else {
+        field->value_set(headers->m_heap, headers->m_mime, URL_SCHEME_HTTPS, URL_LEN_HTTPS);
+      }
+    } else {
+      ink_abort("initialize HTTP/2 pseudo-headers");
+      return PARSE_RESULT_ERROR;
+    }
+
+    // :authority
+    if (MIMEField *field = headers->field_find(HTTP2_VALUE_AUTHORITY, HTTP2_LEN_AUTHORITY); field != nullptr) {
+      int value_len;
+      const char *value = headers->host_get(&value_len);
+
+      if (headers->is_port_in_header()) {
+        int port            = headers->port_get();
+        char *host_and_port = static_cast<char *>(ats_malloc(value_len + 8));
+        value_len           = snprintf(host_and_port, value_len + 8, "%.*s:%d", value_len, value, port);
+
+        field->value_set(headers->m_heap, headers->m_mime, host_and_port, value_len);
+        ats_free(host_and_port);
+      } else {
+        field->value_set(headers->m_heap, headers->m_mime, value, value_len);
+      }
+    } else {
+      ink_abort("initialize HTTP/2 pseudo-headers");
+      return PARSE_RESULT_ERROR;
+    }
 
-  if (http_hdr_type_get(headers->m_http) == HTTP_TYPE_RESPONSE) {
-    // Add ':status' header field
-    char status_str[HTTP2_LEN_STATUS_VALUE_STR + 1];
-    snprintf(status_str, sizeof(status_str), "%d", headers->status_get());
-    MIMEField *status_field = h2_headers->field_create(HTTP2_VALUE_STATUS, HTTP2_LEN_STATUS);
-    status_field->value_set(h2_headers->m_heap, h2_headers->m_mime, status_str, HTTP2_LEN_STATUS_VALUE_STR);
-    h2_headers->field_attach(status_field);
+    // :path
+    if (MIMEField *field = headers->field_find(HTTP2_VALUE_PATH, HTTP2_LEN_PATH); field != nullptr) {
+      int value_len;
+      const char *value = headers->path_get(&value_len);
+      char *path        = static_cast<char *>(ats_malloc(value_len + 1));
+      path[0]           = '/';
+      memcpy(path + 1, value, value_len);
 
-  } else if (http_hdr_type_get(headers->m_http) == HTTP_TYPE_REQUEST) {
-    MIMEField *field;
-    const char *value;
-    int value_len;
+      field->value_set(headers->m_heap, headers->m_mime, path, value_len + 1);
+      ats_free(path);
+    } else {
+      ink_abort("initialize HTTP/2 pseudo-headers");
+      return PARSE_RESULT_ERROR;
+    }
 
-    // Add ':authority' header field
     // TODO: remove host/Host header
-    // [RFC 7540] 8.1.2.3. Clients that generate HTTP/2 requests directly SHOULD use the ":authority" pseudo-header field instead of
-    // the Host header field.
-    field = h2_headers->field_create(HTTP2_VALUE_AUTHORITY, HTTP2_LEN_AUTHORITY);
-    value = headers->host_get(&value_len);
-    if (headers->is_port_in_header()) {
-      int port            = headers->port_get();
-      char *host_and_port = static_cast<char *>(ats_malloc(value_len + 8));
-      value_len           = snprintf(host_and_port, value_len + 8, "%.*s:%d", value_len, value, port);
-      field->value_set(h2_headers->m_heap, h2_headers->m_mime, host_and_port, value_len);
-      ats_free(host_and_port);
+    // [RFC 7540] 8.1.2.3. Clients that generate HTTP/2 requests directly SHOULD use the ":authority" pseudo-header field instead
+    // of the Host header field.
+
+    break;
+  }
+  case HTTP_TYPE_RESPONSE: {
+    // :status
+    if (MIMEField *field = headers->field_find(HTTP2_VALUE_STATUS, HTTP2_LEN_STATUS); field != nullptr) {
+      // ink_small_itoa() requires 5+ buffer length
+      char status_str[HTTP2_LEN_STATUS_VALUE_STR + 3];
+      mime_format_int(status_str, headers->status_get(), sizeof(status_str));
+
+      field->value_set(headers->m_heap, headers->m_mime, status_str, HTTP2_LEN_STATUS_VALUE_STR);
     } else {
-      field->value_set(h2_headers->m_heap, h2_headers->m_mime, value, value_len);
+      ink_abort("initialize HTTP/2 pseudo-headers");
+      return PARSE_RESULT_ERROR;
     }
-    h2_headers->field_attach(field);
-
-    // Add ':method' header field
-    field = h2_headers->field_create(HTTP2_VALUE_METHOD, HTTP2_LEN_METHOD);
-    value = headers->method_get(&value_len);
-    field->value_set(h2_headers->m_heap, h2_headers->m_mime, value, value_len);
-    h2_headers->field_attach(field);
-
-    // Add ':path' header field
-    field      = h2_headers->field_create(HTTP2_VALUE_PATH, HTTP2_LEN_PATH);
-    value      = headers->path_get(&value_len);
-    char *path = static_cast<char *>(ats_malloc(value_len + 1));
-    path[0]    = '/';
-    memcpy(path + 1, value, value_len);
-    field->value_set(h2_headers->m_heap, h2_headers->m_mime, path, value_len + 1);
-    ats_free(path);
-    h2_headers->field_attach(field);
-
-    // Add ':scheme' header field
-    field = h2_headers->field_create(HTTP2_VALUE_SCHEME, HTTP2_LEN_SCHEME);
-    value = headers->scheme_get(&value_len);
-    field->value_set(h2_headers->m_heap, h2_headers->m_mime, value, value_len);
-    h2_headers->field_attach(field);
-  }
-
-  // Copy headers
+    break;
+  }
+  default:
+    ink_abort("HTTP_TYPE_UNKNOWN");
+  }
+
   // Intermediaries SHOULD remove connection-specific header fields.
-  MIMEFieldIter field_iter;
-  for (MIMEField *field = headers->iter_get_first(&field_iter); field != nullptr; field = headers->iter_get_next(&field_iter)) {
-    const char *name;
-    int name_len;
-    const char *value;
-    int value_len;
-    name = field->name_get(&name_len);
-    if ((name_len == MIME_LEN_CONNECTION && strncasecmp(name, MIME_FIELD_CONNECTION, name_len) == 0) ||
-        (name_len == MIME_LEN_KEEP_ALIVE && strncasecmp(name, MIME_FIELD_KEEP_ALIVE, name_len) == 0) ||
-        (name_len == MIME_LEN_PROXY_CONNECTION && strncasecmp(name, MIME_FIELD_PROXY_CONNECTION, name_len) == 0) ||
-        (name_len == MIME_LEN_TRANSFER_ENCODING && strncasecmp(name, MIME_FIELD_TRANSFER_ENCODING, name_len) == 0) ||
-        (name_len == MIME_LEN_UPGRADE && strncasecmp(name, MIME_FIELD_UPGRADE, name_len) == 0)) {
-      continue;
+  for (auto &h : http2_connection_specific_headers) {
+    if (MIMEField *field = headers->field_find(h.name, h.name_len); field != nullptr) {
+      headers->field_delete(field);
     }
-    MIMEField *newfield;
-    name     = field->name_get(&name_len);
-    newfield = h2_headers->field_create(name, name_len);
-    value    = field->value_get(&value_len);
-    newfield->value_set(h2_headers->m_heap, h2_headers->m_mime, value, value_len);
-    h2_headers->field_attach(newfield);
   }
+
+  return PARSE_RESULT_DONE;
 }
 
 Http2ErrorCode
@@ -595,6 +664,7 @@ http2_encode_header_blocks(HTTPHdr *in, uint8_t *out, uint32_t out_len, uint32_t
   if (maximum_table_size == hpack_get_maximum_table_size(handle)) {
     maximum_table_size = -1;
   }
+
   // TODO: It would be better to split Cookie header value
   int64_t result = hpack_encode_header_block(handle, out, out_len, in, maximum_table_size);
   if (result < 0) {
@@ -826,6 +896,27 @@ Http2::init()
                      static_cast<int>(HTTP2_STAT_MAX_PRIORITY_FRAMES_PER_MINUTE_EXCEEDED), RecRawStatSyncSum);
   RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_INSUFFICIENT_AVG_WINDOW_UPDATE_NAME, RECD_INT, RECP_PERSISTENT,
                      static_cast<int>(HTTP2_STAT_INSUFFICIENT_AVG_WINDOW_UPDATE), RecRawStatSyncSum);
+
+  http2_init();
+}
+
+/**
+  mime_init() needs to be called
+ */
+void
+http2_init()
+{
+  ink_assert(MIME_FIELD_CONNECTION != nullptr);
+  ink_assert(MIME_FIELD_KEEP_ALIVE != nullptr);
+  ink_assert(MIME_FIELD_PROXY_CONNECTION != nullptr);
+  ink_assert(MIME_FIELD_TRANSFER_ENCODING != nullptr);
+  ink_assert(MIME_FIELD_UPGRADE != nullptr);
+
+  http2_connection_specific_headers[0] = {MIME_FIELD_CONNECTION, MIME_LEN_CONNECTION};
+  http2_connection_specific_headers[1] = {MIME_FIELD_KEEP_ALIVE, MIME_LEN_KEEP_ALIVE};
+  http2_connection_specific_headers[2] = {MIME_FIELD_PROXY_CONNECTION, MIME_LEN_PROXY_CONNECTION};
+  http2_connection_specific_headers[3] = {MIME_FIELD_TRANSFER_ENCODING, MIME_LEN_TRANSFER_ENCODING};
+  http2_connection_specific_headers[4] = {MIME_FIELD_UPGRADE, MIME_LEN_UPGRADE};
 }
 
 #if TS_HAS_TESTS
diff --git a/proxy/http2/HTTP2.h b/proxy/http2/HTTP2.h
index 7bcc75f..9661645 100644
--- a/proxy/http2/HTTP2.h
+++ b/proxy/http2/HTTP2.h
@@ -43,6 +43,18 @@ typedef int32_t Http2WindowSize;
 extern const char *const HTTP2_CONNECTION_PREFACE;
 const size_t HTTP2_CONNECTION_PREFACE_LEN = 24;
 
+extern const char *HTTP2_VALUE_SCHEME;
+extern const char *HTTP2_VALUE_METHOD;
+extern const char *HTTP2_VALUE_AUTHORITY;
+extern const char *HTTP2_VALUE_PATH;
+extern const char *HTTP2_VALUE_STATUS;
+
+extern const unsigned HTTP2_LEN_SCHEME;
+extern const unsigned HTTP2_LEN_METHOD;
+extern const unsigned HTTP2_LEN_AUTHORITY;
+extern const unsigned HTTP2_LEN_PATH;
+extern const unsigned HTTP2_LEN_STATUS;
+
 const size_t HTTP2_FRAME_HEADER_LEN       = 9;
 const size_t HTTP2_DATA_PADLEN_LEN        = 1;
 const size_t HTTP2_HEADERS_PADLEN_LEN     = 1;
@@ -355,7 +367,9 @@ Http2ErrorCode http2_decode_header_blocks(HTTPHdr *, const uint8_t *, const uint
 Http2ErrorCode http2_encode_header_blocks(HTTPHdr *, uint8_t *, uint32_t, uint32_t *, HpackHandle &, int32_t);
 
 ParseResult http2_convert_header_from_2_to_1_1(HTTPHdr *);
-void http2_generate_h2_header_from_1_1(HTTPHdr *headers, HTTPHdr *h2_headers);
+ParseResult http2_convert_header_from_1_1_to_2(HTTPHdr *);
+void http2_init_pseudo_headers(HTTPHdr &);
+void http2_init();
 
 // Not sure where else to put this, but figure this is as good of a start as
 // anything else.
diff --git a/proxy/http2/Http2ConnectionState.cc b/proxy/http2/Http2ConnectionState.cc
index e295318..23beaff 100644
--- a/proxy/http2/Http2ConnectionState.cc
+++ b/proxy/http2/Http2ConnectionState.cc
@@ -28,6 +28,9 @@
 #include "Http2Frame.h"
 #include "Http2DebugNames.h"
 #include "HttpDebugNames.h"
+
+#include "tscpp/util/PostScript.h"
+
 #include <sstream>
 #include <numeric>
 
@@ -1633,25 +1636,22 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream)
   uint32_t header_blocks_size = 0;
   int payload_length          = 0;
   uint8_t flags               = 0x00;
-  HTTPHdr *resp_header        = &stream->response_header;
 
   Http2StreamDebug(ua_session, stream->get_id(), "Send HEADERS frame");
 
-  HTTPHdr h2_hdr;
-  http2_generate_h2_header_from_1_1(resp_header, &h2_hdr);
+  HTTPHdr *resp_hdr = &stream->response_header;
+  http2_convert_header_from_1_1_to_2(resp_hdr);
 
-  buf_len = resp_header->length_get() * 2; // Make it double just in case
+  buf_len = resp_hdr->length_get() * 2; // Make it double just in case
   buf     = static_cast<uint8_t *>(ats_malloc(buf_len));
   if (buf == nullptr) {
-    h2_hdr.destroy();
     return;
   }
 
   stream->mark_milestone(Http2StreamMilestone::START_ENCODE_HEADERS);
-  Http2ErrorCode result = http2_encode_header_blocks(&h2_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle),
+  Http2ErrorCode result = http2_encode_header_blocks(resp_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle),
                                                      client_settings.get(HTTP2_SETTINGS_HEADER_TABLE_SIZE));
   if (result != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) {
-    h2_hdr.destroy();
     ats_free(buf);
     return;
   }
@@ -1660,8 +1660,8 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream)
   if (header_blocks_size <= static_cast<uint32_t>(BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_HEADERS]))) {
     payload_length = header_blocks_size;
     flags |= HTTP2_FLAGS_HEADERS_END_HEADERS;
-    if ((h2_hdr.presence(MIME_PRESENCE_CONTENT_LENGTH) && h2_hdr.get_content_length() == 0) ||
-        (!resp_header->expect_final_response() && stream->is_write_vio_done())) {
+    if ((resp_hdr->presence(MIME_PRESENCE_CONTENT_LENGTH) && resp_hdr->get_content_length() == 0) ||
+        (!resp_hdr->expect_final_response() && stream->is_write_vio_done())) {
       Http2StreamDebug(ua_session, stream->get_id(), "END_STREAM");
       flags |= HTTP2_FLAGS_HEADERS_END_STREAM;
       stream->send_end_stream = true;
@@ -1679,7 +1679,6 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream)
       fini_event = this_ethread()->schedule_imm_local((Continuation *)this, HTTP2_SESSION_EVENT_FINI);
     }
 
-    h2_hdr.destroy();
     ats_free(buf);
     return;
   }
@@ -1704,14 +1703,12 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream)
     sent += payload_length;
   }
 
-  h2_hdr.destroy();
   ats_free(buf);
 }
 
 bool
 Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url, const MIMEField *accept_encoding)
 {
-  HTTPHdr h1_hdr, h2_hdr;
   uint8_t *buf                = nullptr;
   uint32_t buf_len            = 0;
   uint32_t header_blocks_size = 0;
@@ -1724,37 +1721,35 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url, con
 
   Http2StreamDebug(ua_session, stream->get_id(), "Send PUSH_PROMISE frame");
 
-  h1_hdr.create(HTTP_TYPE_REQUEST);
-  h1_hdr.url_set(&url);
-  h1_hdr.method_set("GET", 3);
+  HTTPHdr hdr;
+  ts::PostScript hdr_defer([&]() -> void { hdr.destroy(); });
+  hdr.create(HTTP_TYPE_REQUEST);
+  http2_init_pseudo_headers(hdr);
+  hdr.url_set(&url);
+  hdr.method_set(HTTP_METHOD_GET, HTTP_LEN_GET);
+
   if (accept_encoding != nullptr) {
-    MIMEField *f;
-    const char *name;
     int name_len;
-    const char *value;
-    int value_len;
+    const char *name = accept_encoding->name_get(&name_len);
+    MIMEField *f     = hdr.field_create(name, name_len);
 
-    name  = accept_encoding->name_get(&name_len);
-    f     = h1_hdr.field_create(name, name_len);
-    value = accept_encoding->value_get(&value_len);
-    f->value_set(h1_hdr.m_heap, h1_hdr.m_mime, value, value_len);
+    int value_len;
+    const char *value = accept_encoding->value_get(&value_len);
+    f->value_set(hdr.m_heap, hdr.m_mime, value, value_len);
 
-    h1_hdr.field_attach(f);
+    hdr.field_attach(f);
   }
 
-  http2_generate_h2_header_from_1_1(&h1_hdr, &h2_hdr);
+  http2_convert_header_from_1_1_to_2(&hdr);
 
-  buf_len = h1_hdr.length_get() * 2; // Make it double just in case
-  h1_hdr.destroy();
-  buf = static_cast<uint8_t *>(ats_malloc(buf_len));
+  buf_len = hdr.length_get() * 2; // Make it double just in case
+  buf     = static_cast<uint8_t *>(ats_malloc(buf_len));
   if (buf == nullptr) {
-    h2_hdr.destroy();
     return false;
   }
-  Http2ErrorCode result = http2_encode_header_blocks(&h2_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle),
+  Http2ErrorCode result = http2_encode_header_blocks(&hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle),
                                                      client_settings.get(HTTP2_SETTINGS_HEADER_TABLE_SIZE));
   if (result != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) {
-    h2_hdr.destroy();
     ats_free(buf);
     return false;
   }
@@ -1796,7 +1791,6 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url, con
   Http2Error error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE);
   stream = this->create_stream(id, error);
   if (!stream) {
-    h2_hdr.destroy();
     return false;
   }
 
@@ -1815,11 +1809,10 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url, con
     }
   }
   stream->change_state(HTTP2_FRAME_TYPE_PUSH_PROMISE, HTTP2_FLAGS_PUSH_PROMISE_END_HEADERS);
-  stream->set_request_headers(h2_hdr);
+  stream->set_request_headers(hdr);
   stream->new_transaction();
   stream->send_request(*this);
 
-  h2_hdr.destroy();
   return true;
 }
 
diff --git a/proxy/http2/Http2Stream.cc b/proxy/http2/Http2Stream.cc
index ddf887c..eef4460 100644
--- a/proxy/http2/Http2Stream.cc
+++ b/proxy/http2/Http2Stream.cc
@@ -54,9 +54,12 @@ Http2Stream::init(Http2StreamId sid, ssize_t initial_rwnd)
   this->_server_rwnd = Http2::initial_window_size;
 
   this->_reader = this->_request_buffer.alloc_reader();
-  // FIXME: Are you sure? every "stream" needs request_header?
+
   _req_header.create(HTTP_TYPE_REQUEST);
   response_header.create(HTTP_TYPE_RESPONSE);
+  // TODO: init _req_header instead of response_header if this Http2Stream is outgoing
+  http2_init_pseudo_headers(response_header);
+
   http_parser_init(&http_parser);
 }
 
@@ -624,6 +627,7 @@ Http2Stream::update_write_request(IOBufferReader *buf_reader, int64_t write_len,
         this->response_header_done = false;
         response_header.destroy();
         response_header.create(HTTP_TYPE_RESPONSE);
+        http2_init_pseudo_headers(response_header);
         http_parser_clear(&http_parser);
         http_parser_init(&http_parser);
       }
diff --git a/proxy/http2/Makefile.am b/proxy/http2/Makefile.am
index 7f77e90..100909b 100644
--- a/proxy/http2/Makefile.am
+++ b/proxy/http2/Makefile.am
@@ -84,6 +84,7 @@ test_libhttp2_CPPFLAGS = $(AM_CPPFLAGS)\
 	-I$(abs_top_srcdir)/tests/include
 
 test_libhttp2_SOURCES = \
+	unit_tests/test_HTTP2.cc \
 	unit_tests/test_Http2Frame.cc \
 	unit_tests/main.cc
 
diff --git a/proxy/http2/unit_tests/test_HTTP2.cc b/proxy/http2/unit_tests/test_HTTP2.cc
new file mode 100644
index 0000000..e67f50e
--- /dev/null
+++ b/proxy/http2/unit_tests/test_HTTP2.cc
@@ -0,0 +1,169 @@
+/** @file
+
+    Unit tests for HTTP2
+
+    @section license License
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+*/
+
+#include "catch.hpp"
+
+#include "HTTP2.h"
+
+#include "tscpp/util/PostScript.h"
+
+TEST_CASE("Convert HTTPHdr", "[HTTP2]")
+{
+  url_init();
+  mime_init();
+  http_init();
+  http2_init();
+
+  HTTPParser parser;
+  ts::PostScript parser_defer([&]() -> void { http_parser_clear(&parser); });
+  http_parser_init(&parser);
+
+  SECTION("request")
+  {
+    const char request[] = "GET /index.html HTTP/1.1\r\n"
+                           "Host: trafficserver.apache.org\r\n"
+                           "User-Agent: foobar\r\n"
+                           "\r\n";
+
+    HTTPHdr hdr_1;
+    ts::PostScript hdr_1_defer([&]() -> void { hdr_1.destroy(); });
+    hdr_1.create(HTTP_TYPE_REQUEST);
+    http2_init_pseudo_headers(hdr_1);
+
+    // parse
+    const char *start = request;
+    const char *end   = request + sizeof(request) - 1;
+    hdr_1.parse_req(&parser, &start, end, true);
+
+    // convert to HTTP/2
+    http2_convert_header_from_1_1_to_2(&hdr_1);
+
+    // check pseudo headers
+    // :method
+    {
+      MIMEField *f = hdr_1.field_find(HTTP2_VALUE_METHOD, HTTP2_LEN_METHOD);
+      REQUIRE(f != nullptr);
+      std::string_view v = f->value_get();
+      CHECK(v.compare("GET") == 0);
+    }
+
+    // :scheme
+    {
+      MIMEField *f = hdr_1.field_find(HTTP2_VALUE_SCHEME, HTTP2_LEN_SCHEME);
+      REQUIRE(f != nullptr);
+      std::string_view v = f->value_get();
+      CHECK(v.compare("https") == 0);
+    }
+
+    // :authority
+    {
+      MIMEField *f = hdr_1.field_find(HTTP2_VALUE_AUTHORITY, HTTP2_LEN_AUTHORITY);
+      REQUIRE(f != nullptr);
+      std::string_view v = f->value_get();
+      CHECK(v.compare("trafficserver.apache.org") == 0);
+    }
+
+    // :path
+    {
+      MIMEField *f = hdr_1.field_find(HTTP2_VALUE_PATH, HTTP2_LEN_PATH);
+      REQUIRE(f != nullptr);
+      std::string_view v = f->value_get();
+      CHECK(v.compare("/index.html") == 0);
+    }
+
+    // convert to HTTP/1.1
+    HTTPHdr hdr_2;
+    ts::PostScript hdr_2_defer([&]() -> void { hdr_2.destroy(); });
+    hdr_2.create(HTTP_TYPE_REQUEST);
+    hdr_2.copy(&hdr_1);
+
+    http2_convert_header_from_2_to_1_1(&hdr_2);
+
+    // dump
+    char buf[128]  = {0};
+    int bufindex   = 0;
+    int dumpoffset = 0;
+
+    hdr_2.print(buf, sizeof(buf), &bufindex, &dumpoffset);
+
+    // check
+    CHECK_THAT(buf, Catch::StartsWith("GET https://trafficserver.apache.org/index.html HTTP/1.1\r\n"
+                                      "Host: trafficserver.apache.org\r\n"
+                                      "User-Agent: foobar\r\n"
+                                      "\r\n"));
+  }
+
+  SECTION("response")
+  {
+    const char response[] = "HTTP/1.1 200 OK\r\n"
+                            "Connection: close\r\n"
+                            "\r\n";
+
+    HTTPHdr hdr_1;
+    ts::PostScript hdr_1_defer([&]() -> void { hdr_1.destroy(); });
+    hdr_1.create(HTTP_TYPE_RESPONSE);
+    http2_init_pseudo_headers(hdr_1);
+
+    // parse
+    const char *start = response;
+    const char *end   = response + sizeof(response) - 1;
+    hdr_1.parse_resp(&parser, &start, end, true);
+
+    // convert to HTTP/2
+    http2_convert_header_from_1_1_to_2(&hdr_1);
+
+    // check pseudo headers
+    // :status
+    {
+      MIMEField *f = hdr_1.field_find(HTTP2_VALUE_STATUS, HTTP2_LEN_STATUS);
+      REQUIRE(f != nullptr);
+      std::string_view v = f->value_get();
+      CHECK(v.compare("200") == 0);
+    }
+
+    // no connection header
+    {
+      MIMEField *f = hdr_1.field_find(MIME_FIELD_CONNECTION, MIME_LEN_CONNECTION);
+      CHECK(f == nullptr);
+    }
+
+    // convert to HTTP/1.1
+    HTTPHdr hdr_2;
+    ts::PostScript hdr_2_defer([&]() -> void { hdr_2.destroy(); });
+    hdr_2.create(HTTP_TYPE_REQUEST);
+    hdr_2.copy(&hdr_1);
+
+    http2_convert_header_from_2_to_1_1(&hdr_2);
+
+    // dump
+    char buf[128]  = {0};
+    int bufindex   = 0;
+    int dumpoffset = 0;
+
+    hdr_2.print(buf, sizeof(buf), &bufindex, &dumpoffset);
+
+    // check
+    REQUIRE(bufindex > 0);
+    CHECK_THAT(buf, Catch::StartsWith("HTTP/1.1 200 OK\r\n\r\n"));
+  }
+}
diff --git a/proxy/http3/Http3HeaderFramer.cc b/proxy/http3/Http3HeaderFramer.cc
index b3ac791..b69f298 100644
--- a/proxy/http3/Http3HeaderFramer.cc
+++ b/proxy/http3/Http3HeaderFramer.cc
@@ -71,9 +71,9 @@ Http3HeaderFramer::is_done() const
 }
 
 void
-Http3HeaderFramer::_convert_header_from_1_1_to_3(HTTPHdr *h3_hdrs, HTTPHdr *h1_hdrs)
+Http3HeaderFramer::_convert_header_from_1_1_to_3(HTTPHdr *hdrs)
 {
-  http2_generate_h2_header_from_1_1(h1_hdrs, h3_hdrs);
+  http2_convert_header_from_1_1_to_2(hdrs);
 }
 
 void
@@ -85,22 +85,23 @@ Http3HeaderFramer::_generate_header_block()
 
   if (this->_transaction->direction() == NET_VCONNECTION_OUT) {
     this->_header.create(HTTP_TYPE_REQUEST);
+    http2_init_pseudo_headers(this->_header);
     parse_result = this->_header.parse_req(&this->_http_parser, this->_source_vio->get_reader(), &bytes_used, false);
   } else {
     this->_header.create(HTTP_TYPE_RESPONSE);
+    http2_init_pseudo_headers(this->_header);
     parse_result = this->_header.parse_resp(&this->_http_parser, this->_source_vio->get_reader(), &bytes_used, false);
   }
   this->_source_vio->ndone += bytes_used;
 
   switch (parse_result) {
   case PARSE_RESULT_DONE: {
-    HTTPHdr h3_hdr;
-    this->_convert_header_from_1_1_to_3(&h3_hdr, &this->_header);
+    this->_convert_header_from_1_1_to_3(&this->_header);
 
     this->_header_block        = new_MIOBuffer(BUFFER_SIZE_INDEX_32K);
     this->_header_block_reader = this->_header_block->alloc_reader();
 
-    this->_qpack->encode(this->_stream_id, h3_hdr, this->_header_block, this->_header_block_len);
+    this->_qpack->encode(this->_stream_id, this->_header, this->_header_block, this->_header_block_len);
     break;
   }
   case PARSE_RESULT_CONT:
diff --git a/proxy/http3/Http3HeaderFramer.h b/proxy/http3/Http3HeaderFramer.h
index 55ce586..2e70338 100644
--- a/proxy/http3/Http3HeaderFramer.h
+++ b/proxy/http3/Http3HeaderFramer.h
@@ -55,6 +55,6 @@ private:
   HTTPParser _http_parser;
   HTTPHdr _header;
 
-  void _convert_header_from_1_1_to_3(HTTPHdr *h3_hdrs, HTTPHdr *h1_hdrs);
+  void _convert_header_from_1_1_to_3(HTTPHdr *hdrs);
   void _generate_header_block();
 };


[trafficserver] 03/05: URL::parse fixes for empty paths (#7119)

Posted by zw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 998d61e91c774d3805e2658c3258d7fdd7a81843
Author: Brian Neradt <br...@gmail.com>
AuthorDate: Tue Sep 1 10:32:58 2020 -0500

    URL::parse fixes for empty paths (#7119)
    
    URL::parse could not handle URLs with empty paths, such as the
    following:
    
    http://www.example.com?name=something
    
    Note the lack of '/' after the hostname.
    
    This updates the parsing and printing logic to be able to handle this
    while being careful not to break current parse expectations.
    
    (cherry picked from commit 4e375331c9bc0e9e3955fd15f11b8c352ab2afb0)
---
 .../api/functions/TSUrlHostGet.en.rst              |   2 +-
 include/ts/ts.h                                    |   4 +-
 plugins/header_rewrite/operators.cc                |   4 +-
 proxy/hdrs/URL.cc                                  | 108 ++++--
 proxy/hdrs/URL.h                                   | 102 +++++-
 proxy/hdrs/unit_tests/test_URL.cc                  | 373 +++++++++++++++++++++
 proxy/http/remap/RemapConfig.cc                    |  10 +-
 proxy/http2/unit_tests/test_HTTP2.cc               |  10 +-
 tests/gold_tests/autest-site/microDNS.test.ext     |   2 +-
 .../header_rewrite/gold/set-redirect.gold          |   8 +
 ...e_client.test.py => header_rewrite_url.test.py} |  42 ++-
 .../header_rewrite/rules/set_redirect.conf         |  18 +
 tests/gold_tests/remap/gold/remap-zero-200.gold    |   7 +
 tests/gold_tests/remap/regex_map.test.py           |  62 ++++
 14 files changed, 690 insertions(+), 62 deletions(-)

diff --git a/doc/developer-guide/api/functions/TSUrlHostGet.en.rst b/doc/developer-guide/api/functions/TSUrlHostGet.en.rst
index 3db194c..bb07176 100644
--- a/doc/developer-guide/api/functions/TSUrlHostGet.en.rst
+++ b/doc/developer-guide/api/functions/TSUrlHostGet.en.rst
@@ -51,7 +51,7 @@ and retrieve or modify parts of URLs, such as their host, port or scheme
 information.
 
 :func:`TSUrlSchemeGet`, :func:`TSUrlUserGet`, :func:`TSUrlPasswordGet`,
-:func:`TSUrlHostGet`, :func:`TSUrlHttpParamsGet`, :func:`TSUrlHttpQueryGet`
+:func:`TSUrlHostGet`, :func:`TSUrlPathGet`, :func:`TSUrlHttpParamsGet`, :func:`TSUrlHttpQueryGet`
 and :func:`TSUrlHttpFragmentGet` each retrieve an internal pointer to the
 specified portion of the URL from the marshall buffer :arg:`bufp`. The length
 of the returned string is placed in :arg:`length` and a pointer to the URL
diff --git a/include/ts/ts.h b/include/ts/ts.h
index eb18a08..a44827c 100644
--- a/include/ts/ts.h
+++ b/include/ts/ts.h
@@ -398,8 +398,8 @@ tsapi int TSUrlLengthGet(TSMBuffer bufp, TSMLoc offset);
     string in the parameter length. This is the same length that
     TSUrlLengthGet() returns. The returned string is allocated by a
     call to TSmalloc(). It should be freed by a call to TSfree().
-    The length parameter must present, providing storage for the URL
-    string length value.
+    The length parameter must be present, providing storage for the
+    URL string length value.
     Note: To get the effective URL from a request, use the alternative
           TSHttpTxnEffectiveUrlStringGet or
           TSHttpHdrEffectiveUrlBufGet APIs.
diff --git a/plugins/header_rewrite/operators.cc b/plugins/header_rewrite/operators.cc
index 4c58f2f..f0cd8b4 100644
--- a/plugins/header_rewrite/operators.cc
+++ b/plugins/header_rewrite/operators.cc
@@ -406,7 +406,9 @@ OperatorSetRedirect::exec(const Resources &res) const
     const char *end   = value.size() + start;
     if (remap) {
       // Set new location.
-      TSUrlParse(bufp, url_loc, &start, end);
+      if (TS_PARSE_ERROR == TSUrlParse(bufp, url_loc, &start, end)) {
+        TSDebug(PLUGIN_NAME, "Could not set Location field value to: %s", value.c_str());
+      }
       // Set the new status.
       TSHttpTxnStatusSet(res.txnp, static_cast<TSHttpStatus>(_status.get_int_value()));
       const_cast<Resources &>(res).changed_url = true;
diff --git a/proxy/hdrs/URL.cc b/proxy/hdrs/URL.cc
index c528ee8..c2a903b 100644
--- a/proxy/hdrs/URL.cc
+++ b/proxy/hdrs/URL.cc
@@ -290,10 +290,11 @@ url_copy_onto_as_server_url(URLImpl *s_url, HdrHeap *s_heap, URLImpl *d_url, Hdr
 {
   url_nuke_proxy_stuff(d_url);
 
-  d_url->m_ptr_path     = s_url->m_ptr_path;
-  d_url->m_ptr_params   = s_url->m_ptr_params;
-  d_url->m_ptr_query    = s_url->m_ptr_query;
-  d_url->m_ptr_fragment = s_url->m_ptr_fragment;
+  d_url->m_ptr_path      = s_url->m_ptr_path;
+  d_url->m_path_is_empty = s_url->m_path_is_empty;
+  d_url->m_ptr_params    = s_url->m_ptr_params;
+  d_url->m_ptr_query     = s_url->m_ptr_query;
+  d_url->m_ptr_fragment  = s_url->m_ptr_fragment;
   url_clear_string_ref(d_url);
 
   d_url->m_len_path     = s_url->m_len_path;
@@ -827,9 +828,13 @@ url_length_get(URLImpl *url)
   }
 
   if (url->m_ptr_path) {
-    length += url->m_len_path + 1; // +1 for /
-  } else {
-    length += 1; // +1 for /
+    length += url->m_len_path;
+  }
+
+  if (!url->m_path_is_empty) {
+    // m_ptr_path does not contain the initial "/" and thus m_len_path does not
+    // count it. We account for it here.
+    length += 1; // +1 for "/"
   }
 
   if (url->m_ptr_params && url->m_len_params > 0) {
@@ -1185,26 +1190,30 @@ url_is_strictly_compliant(const char *start, const char *end)
 using namespace UrlImpl;
 
 ParseResult
-url_parse(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings_p, bool strict_uri_parsing)
+url_parse(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings_p, bool strict_uri_parsing,
+          bool verify_host_characters)
 {
   if (strict_uri_parsing && !url_is_strictly_compliant(*start, end)) {
     return PARSE_RESULT_ERROR;
   }
 
   ParseResult zret = url_parse_scheme(heap, url, start, end, copy_strings_p);
-  return PARSE_RESULT_CONT == zret ? url_parse_http(heap, url, start, end, copy_strings_p) : zret;
+  return PARSE_RESULT_CONT == zret ? url_parse_http(heap, url, start, end, copy_strings_p, verify_host_characters) : zret;
 }
 
 ParseResult
-url_parse_no_path_component_breakdown(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings_p)
+url_parse_regex(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings_p)
 {
   ParseResult zret = url_parse_scheme(heap, url, start, end, copy_strings_p);
-  return PARSE_RESULT_CONT == zret ? url_parse_http_no_path_component_breakdown(heap, url, start, end, copy_strings_p) : zret;
+  return PARSE_RESULT_CONT == zret ? url_parse_http_regex(heap, url, start, end, copy_strings_p) : zret;
 }
 
 /**
   Parse internet URL.
 
+  After this function completes, start will point to the first character after the
+  host or @a end if there are not characters after it.
+
   @verbatim
   [://][user[:password]@]host[:port]
 
@@ -1219,7 +1228,8 @@ url_parse_no_path_component_breakdown(HdrHeap *heap, URLImpl *url, const char **
 */
 
 ParseResult
-url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, char const *end, bool copy_strings_p)
+url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, char const *end, bool copy_strings_p,
+                   bool verify_host_characters)
 {
   const char *cur = *start;
   const char *base;              // Base for host/port field.
@@ -1296,8 +1306,14 @@ url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, char const *
       bracket = cur; // location and flag.
       ++cur;
       break;
-    case '/':    // we're done with this phase.
-      end = cur; // cause loop exit
+    // RFC 3986, section 3.2:
+    // The authority component is ...  terminated by the next slash ("/"),
+    // question mark ("?"), or number sign ("#") character, or by the end of
+    // the URI.
+    case '/':
+    case '?':
+    case '#':
+      end = cur; // We're done parsing authority, cause loop exit.
       break;
     default:
       ++cur;
@@ -1324,7 +1340,7 @@ url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, char const *
     }
   }
   if (host._size) {
-    if (validate_host_name(std::string_view(host._ptr, host._size))) {
+    if (!verify_host_characters || validate_host_name(std::string_view(host._ptr, host._size))) {
       url_host_set(heap, url, host._ptr, host._size, copy_strings_p);
     } else {
       return PARSE_RESULT_ERROR;
@@ -1339,9 +1355,6 @@ url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, char const *
     }
     url_port_set(heap, url, port._ptr, port._size, copy_strings_p);
   }
-  if ('/' == *cur) {
-    ++cur; // must do this after filling in host/port.
-  }
   *start = cur;
   return PARSE_RESULT_DONE;
 }
@@ -1352,7 +1365,7 @@ url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, char const *
 // empties params/query/fragment component
 
 ParseResult
-url_parse_http(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings)
+url_parse_http(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings, bool verify_host_characters)
 {
   ParseResult err;
   const char *cur;
@@ -1366,18 +1379,28 @@ url_parse_http(HdrHeap *heap, URLImpl *url, const char **start, const char *end,
   const char *fragment_end   = nullptr;
   char mask;
 
-  err = url_parse_internet(heap, url, start, end, copy_strings);
+  err = url_parse_internet(heap, url, start, end, copy_strings, verify_host_characters);
   if (err < 0) {
     return err;
   }
 
   cur = *start;
+  if (url->m_ptr_host == nullptr && ((end - cur) >= 2) && '/' == *cur && '/' == *(cur + 1)) {
+    // RFC 3986 section-3.3:
+    // If a URI does not contain an authority component, then the path cannot
+    // begin with two slash characters ("//").
+    return PARSE_RESULT_ERROR;
+  }
+  bool nothing_after_host = false;
   if (*start == end) {
+    nothing_after_host = true;
     goto done;
   }
 
-  path_start = cur;
-  mask       = ';' & '?' & '#';
+  if (*cur == '/') {
+    path_start = cur;
+  }
+  mask = ';' & '?' & '#';
 parse_path2:
   if ((*cur & mask) == mask) {
     if (*cur == ';') {
@@ -1431,10 +1454,42 @@ parse_fragment1:
 
 done:
   if (path_start) {
+    // There was an explicit path set with '/'.
     if (!path_end) {
       path_end = cur;
     }
+    if (path_start == path_end) {
+      url->m_path_is_empty = true;
+    } else {
+      url->m_path_is_empty = false;
+      // Per RFC 3986 section 3, the query string does not contain the initial
+      // '?' nor does the fragment contain the initial '#'. The path however
+      // does contain the initial '/' and a path can be empty, containing no
+      // characters at all, not even the initial '/'. Our path_get interface,
+      // however, has long not behaved accordingly, returning only the
+      // characters after the first '/'. This does not allow users to tell
+      // whether the path was absolutely empty. Further, callers have to
+      // account for the missing first '/' character themselves, either in URL
+      // length calculations or when piecing together their own URL. There are
+      // various examples of this in core and in the plugins shipped with Traffic
+      // Server.
+      //
+      // Correcting this behavior by having path_get return the entire path,
+      // (inclusive of any first '/') and an empty string if there were no
+      // characters specified in the path would break existing functionality,
+      // including various plugins that expect this behavior. Rather than
+      // correcting this behavior, therefore, we maintain the current
+      // functionality but add state to determine whether the path was
+      // absolutely empty so we can reconstruct such URLs.
+      ++path_start;
+    }
     url_path_set(heap, url, path_start, path_end - path_start, copy_strings);
+  } else if (!nothing_after_host) {
+    // There was no path set via '/': it is absolutely empty. However, if there
+    // is no path, query, or fragment after the host, we by convention add a
+    // slash after the authority.  Users of URL expect this behavior. Thus the
+    // nothing_after_host check.
+    url->m_path_is_empty = true;
   }
   if (params_start) {
     if (!params_end) {
@@ -1443,12 +1498,14 @@ done:
     url_params_set(heap, url, params_start, params_end - params_start, copy_strings);
   }
   if (query_start) {
+    // There was a query string marked by '?'.
     if (!query_end) {
       query_end = cur;
     }
     url_query_set(heap, url, query_start, query_end - query_start, copy_strings);
   }
   if (fragment_start) {
+    // There was a fragment string marked by '#'.
     if (!fragment_end) {
       fragment_end = cur;
     }
@@ -1460,7 +1517,7 @@ done:
 }
 
 ParseResult
-url_parse_http_no_path_component_breakdown(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings)
+url_parse_http_regex(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings)
 {
   const char *cur = *start;
   const char *host_end;
@@ -1572,8 +1629,9 @@ url_print(URLImpl *url, char *buf_start, int buf_length, int *buf_index_inout, i
     }
   }
 
-  TRY(mime_mem_print("/", 1, buf_start, buf_length, buf_index_inout, buf_chars_to_skip_inout));
-
+  if (!url->m_path_is_empty) {
+    TRY(mime_mem_print("/", 1, buf_start, buf_length, buf_index_inout, buf_chars_to_skip_inout));
+  }
   if (url->m_ptr_path) {
     TRY(mime_mem_print(url->m_ptr_path, url->m_len_path, buf_start, buf_length, buf_index_inout, buf_chars_to_skip_inout));
   }
diff --git a/proxy/hdrs/URL.h b/proxy/hdrs/URL.h
index 025d2db..87c9207 100644
--- a/proxy/hdrs/URL.h
+++ b/proxy/hdrs/URL.h
@@ -74,7 +74,9 @@ struct URLImpl : public HdrHeapObjImpl {
   // 6 bytes
 
   uint32_t m_clean : 1;
-  // 8 bytes + 1 bit, will result in padding
+  /// Whether the URI had an absolutely empty path, not even an initial '/'.
+  uint32_t m_path_is_empty : 1;
+  // 8 bytes + 2 bits, will result in padding
 
   // Marshaling Functions
   int marshal(MarshalXlate *str_xlate, int num_xlate);
@@ -194,14 +196,19 @@ void url_params_set(HdrHeap *heap, URLImpl *url, const char *value, int length,
 void url_query_set(HdrHeap *heap, URLImpl *url, const char *value, int length, bool copy_string);
 void url_fragment_set(HdrHeap *heap, URLImpl *url, const char *value, int length, bool copy_string);
 
+constexpr bool USE_STRICT_URI_PARSING = true;
+
 ParseResult url_parse(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings,
-                      bool strict_uri_parsing = false);
-ParseResult url_parse_no_path_component_breakdown(HdrHeap *heap, URLImpl *url, const char **start, const char *end,
-                                                  bool copy_strings);
-ParseResult url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings);
-ParseResult url_parse_http(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings);
-ParseResult url_parse_http_no_path_component_breakdown(HdrHeap *heap, URLImpl *url, const char **start, const char *end,
-                                                       bool copy_strings);
+                      bool strict_uri_parsing = false, bool verify_host_characters = true);
+
+constexpr bool COPY_STRINGS = true;
+
+ParseResult url_parse_regex(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings);
+ParseResult url_parse_internet(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings,
+                               bool verify_host_characters);
+ParseResult url_parse_http(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings,
+                           bool verify_host_characters);
+ParseResult url_parse_http_regex(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings);
 
 char *url_unescapify(Arena *arena, const char *str, int length);
 
@@ -276,15 +283,48 @@ public:
   const char *fragment_get(int *length);
   void fragment_set(const char *value, int length);
 
+  /**
+   * Parse the given URL string and populate URL state with the parts.
+   *
+   * @param[in] url The URL to parse.
+   *
+   * @return PARSE_RESULT_DONE if parsing was successful, PARSE_RESULT_ERROR
+   * otherwise.
+   */
+  ParseResult parse(std::string_view url);
+
+  /** Same as parse() but do not verify that the host has proper FQDN
+   * characters.
+   *
+   * This is useful for RemapConfig To targets which have "$[0-9]" references
+   * in their host names which will later be substituted for other text.
+   */
+  ParseResult parse_no_host_check(std::string_view url);
+
   ParseResult parse(const char **start, const char *end);
   ParseResult parse(const char *str, int length);
-  ParseResult parse_no_path_component_breakdown(const char *str, int length);
+
+  /** Perform more simplified parsing that is resilient to receiving regular
+   * expressions.
+   *
+   * This simply looks for the first '/' in a URL and considers that the end of
+   * the authority and the beginning of the rest of the URL. This allows for
+   * the '?' character in an authority as a part of a regex without it being
+   * considered a query parameter and, thus, avoids confusing the parser.
+   *
+   * This is only used in RemapConfig and may have no other uses.
+   */
+  ParseResult parse_regex(std::string_view url);
+  ParseResult parse_regex(const char *str, int length);
 
 public:
   static char *unescapify(Arena *arena, const char *str, int length);
   // No gratuitous copies!
   URL(const URL &u) = delete;
   URL &operator=(const URL &u) = delete;
+
+private:
+  static constexpr bool VERIFY_HOST_CHARACTERS = true;
 };
 
 /*-------------------------------------------------------------------------
@@ -687,10 +727,35 @@ URL::fragment_set(const char *value, int length)
 
  */
 inline ParseResult
+URL::parse(std::string_view url)
+{
+  return this->parse(url.data(), static_cast<int>(url.size()));
+}
+
+/**
+  Parser doesn't clear URL first, so if you parse over a non-clear URL,
+  the resulting URL may contain some of the previous data.
+
+ */
+inline ParseResult
+URL::parse_no_host_check(std::string_view url)
+{
+  ink_assert(valid());
+  const char *start = url.data();
+  const char *end   = url.data() + url.length();
+  return url_parse(m_heap, m_url_impl, &start, end, COPY_STRINGS, !USE_STRICT_URI_PARSING, !VERIFY_HOST_CHARACTERS);
+}
+
+/**
+  Parser doesn't clear URL first, so if you parse over a non-clear URL,
+  the resulting URL may contain some of the previous data.
+
+ */
+inline ParseResult
 URL::parse(const char **start, const char *end)
 {
   ink_assert(valid());
-  return url_parse(m_heap, m_url_impl, start, end, true);
+  return url_parse(m_heap, m_url_impl, start, end, COPY_STRINGS);
 }
 
 /**
@@ -713,13 +778,26 @@ URL::parse(const char *str, int length)
 
  */
 inline ParseResult
-URL::parse_no_path_component_breakdown(const char *str, int length)
+URL::parse_regex(std::string_view url)
+{
+  ink_assert(valid());
+  const char *str = url.data();
+  return url_parse_regex(m_heap, m_url_impl, &str, str + url.length(), COPY_STRINGS);
+}
+
+/**
+  Parser doesn't clear URL first, so if you parse over a non-clear URL,
+  the resulting URL may contain some of the previous data.
+
+ */
+inline ParseResult
+URL::parse_regex(const char *str, int length)
 {
   ink_assert(valid());
   if (length < 0)
     length = (int)strlen(str);
   ink_assert(valid());
-  return url_parse_no_path_component_breakdown(m_heap, m_url_impl, &str, str + length, true);
+  return url_parse_regex(m_heap, m_url_impl, &str, str + length, COPY_STRINGS);
 }
 
 /*-------------------------------------------------------------------------
diff --git a/proxy/hdrs/unit_tests/test_URL.cc b/proxy/hdrs/unit_tests/test_URL.cc
index 9ebcd9d..975bd5e 100644
--- a/proxy/hdrs/unit_tests/test_URL.cc
+++ b/proxy/hdrs/unit_tests/test_URL.cc
@@ -93,3 +93,376 @@ TEST_CASE("ParseRulesStrictURI", "[proxy][parseuri]")
     }
   }
 }
+
+struct url_parse_test_case {
+  const std::string input_uri;
+  const std::string expected_printed_url;
+  const bool verify_host_characters;
+  const std::string expected_printed_url_regex;
+  const bool is_valid;
+  const bool is_valid_regex;
+};
+
+constexpr bool IS_VALID               = true;
+constexpr bool VERIFY_HOST_CHARACTERS = true;
+
+// clang-format off
+std::vector<url_parse_test_case> url_parse_test_cases = {
+  {
+    // The following scheme-only URI is technically valid per the spec, but we
+    // have historically returned this as invalid and I'm not comfortable
+    // changing it in case something depends upon this behavior. Besides, a
+    // scheme-only URI is probably not helpful to us nor something likely
+    // Traffic Server will see.
+    "http://",
+    "",
+    VERIFY_HOST_CHARACTERS,
+    "",
+    !IS_VALID,
+    !IS_VALID
+  },
+  {
+    "https:///",
+    "https:///",
+    VERIFY_HOST_CHARACTERS,
+    "https:///",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    // RFC 3986 section-3: When authority is not present, the path cannot begin
+    // with two slash characters ("//"). The parse_regex, though, is more
+    // forgiving.
+    "https:////",
+    "",
+    VERIFY_HOST_CHARACTERS,
+    "https:////",
+    !IS_VALID,
+    IS_VALID
+  },
+  {
+    // By convention, our url_print() function adds a path of '/' at the end of
+    // URLs that have no path, query, or fragment after the authority.
+    "mailto:Test.User@example.com",
+    "mailto:Test.User@example.com/",
+    VERIFY_HOST_CHARACTERS,
+    "mailto:Test.User@example.com/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "mailto:Test.User@example.com:25",
+    "mailto:Test.User@example.com:25/",
+    VERIFY_HOST_CHARACTERS,
+    "mailto:Test.User@example.com:25/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com",
+    "https://www.example.com/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/",
+    "https://www.example.com/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com//",
+    "https://www.example.com//",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com//",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://127.0.0.1",
+    "https://127.0.0.1/",
+    VERIFY_HOST_CHARACTERS,
+    "https://127.0.0.1/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://[::1]",
+    "https://[::1]/",
+    VERIFY_HOST_CHARACTERS,
+    "https://[::1]/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://127.0.0.1/",
+    "https://127.0.0.1/",
+    VERIFY_HOST_CHARACTERS,
+    "https://127.0.0.1/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com:8888",
+    "https://www.example.com:8888/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com:8888/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com:8888/",
+    "https://www.example.com:8888/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com:8888/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path",
+    "https://www.example.com/a/path",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com//a/path",
+    "https://www.example.com//a/path",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com//a/path",
+    IS_VALID,
+    IS_VALID
+  },
+
+  // Technically a trailing '?' with an empty query string is valid, but we
+  // drop the '?'. The parse_regex, however, makes no distinction between
+  // query, fragment, and path components so it does not cut it out.
+  {
+    "https://www.example.com/a/path?",
+    "https://www.example.com/a/path",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path?",
+    IS_VALID,
+    IS_VALID},
+  {
+    "https://www.example.com/a/path?name=value",
+    "https://www.example.com/a/path?name=value",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path?name=value",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path?name=/a/path/value",
+    "https://www.example.com/a/path?name=/a/path/value",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path?name=/a/path/value",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path?name=/a/path/value;some=other_value",
+    "https://www.example.com/a/path?name=/a/path/value;some=other_value",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path?name=/a/path/value;some=other_value",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path?name=/a/path/value;some=other_value/",
+    "https://www.example.com/a/path?name=/a/path/value;some=other_value/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path?name=/a/path/value;some=other_value/",
+    IS_VALID,
+    IS_VALID
+  },
+
+  // Again, URL::parse drops a final '?'.
+  {
+    "https://www.example.com?",
+    "https://www.example.com",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com?/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com?name=value",
+    "https://www.example.com?name=value",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com?name=value/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com?name=value/",
+    "https://www.example.com?name=value/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com?name=value/",
+    IS_VALID,
+    IS_VALID
+  },
+
+  // URL::parse also drops the final '#'.
+  {
+    "https://www.example.com#",
+    "https://www.example.com",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com#/",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com#some=value",
+    "https://www.example.com#some=value",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com#some=value/",
+    IS_VALID,
+    IS_VALID},
+  {
+    "https://www.example.com/a/path#",
+    "https://www.example.com/a/path",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path#",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path#some=value",
+    "https://www.example.com/a/path#some=value",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path#some=value",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    // Note that this final '?' is not for a query parameter but is a part of
+    // the fragment.
+    "https://www.example.com/a/path#some=value?",
+    "https://www.example.com/a/path#some=value?",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path#some=value?",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path#some=value?with_question",
+    "https://www.example.com/a/path#some=value?with_question",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path#some=value?with_question",
+    IS_VALID,
+    IS_VALID
+  },
+  {
+    "https://www.example.com/a/path?name=value?_with_question#some=value?with_question/",
+    "https://www.example.com/a/path?name=value?_with_question#some=value?with_question/",
+    VERIFY_HOST_CHARACTERS,
+    "https://www.example.com/a/path?name=value?_with_question#some=value?with_question/",
+    IS_VALID,
+    IS_VALID
+  },
+
+  // The following are some examples of strings we expect from regex_map in
+  // remap.config.  The "From" portion, which are regular expressions, are
+  // often not parsible by URL::parse but are by URL::parse_regex, which is the
+  // purpose of its existence.
+  {
+    R"(http://(.*)?reactivate\.mail\.yahoo\.com/)",
+    "",
+    VERIFY_HOST_CHARACTERS,
+    R"(http://(.*)?reactivate\.mail\.yahoo\.com/)",
+    !IS_VALID,
+    IS_VALID
+  },
+  {
+    // The following is an example of a "To" URL in a regex_map line. We'll
+    // first verify that the '$' is flagged as invalid for a host in this case.
+    "http://$1reactivate.real.mail.yahoo.com/",
+    "http://$1reactivate.real.mail.yahoo.com/",
+    VERIFY_HOST_CHARACTERS,
+    "http://$1reactivate.real.mail.yahoo.com/",
+    !IS_VALID,
+    IS_VALID
+  },
+  {
+    // Same as above, but this time we pass in !VERIFY_HOST_CHARACTERS. This is
+    // how RemapConfig will call this parse() function.
+    "http://$1reactivate.real.mail.yahoo.com/",
+    "http://$1reactivate.real.mail.yahoo.com/",
+    !VERIFY_HOST_CHARACTERS,
+    "http://$1reactivate.real.mail.yahoo.com/",
+    IS_VALID,
+    IS_VALID
+  }
+};
+// clang-format on
+
+constexpr bool URL_PARSE       = true;
+constexpr bool URL_PARSE_REGEX = false;
+
+/** Test the specified url.parse function.
+ *
+ * URL::parse and URL::parse_regex should behave the same. This function
+ * performs the same behavior for each.
+ *
+ * @param[in] test_case The test case specification to run.
+ *
+ * @param[in] parse_function Whether to run parse() or
+ * parse_regex().
+ */
+void
+test_parse(url_parse_test_case const &test_case, bool parse_function)
+{
+  URL url;
+  HdrHeap *heap = new_HdrHeap();
+  url.create(heap);
+  ParseResult result = PARSE_RESULT_OK;
+  if (parse_function == URL_PARSE) {
+    if (test_case.verify_host_characters) {
+      result = url.parse(test_case.input_uri);
+    } else {
+      result = url.parse_no_host_check(test_case.input_uri);
+    }
+  } else if (parse_function == URL_PARSE_REGEX) {
+    result = url.parse_regex(test_case.input_uri);
+  }
+  bool expected_is_valid = test_case.is_valid;
+  if (parse_function == URL_PARSE_REGEX) {
+    expected_is_valid = test_case.is_valid_regex;
+  }
+  if (expected_is_valid && result != PARSE_RESULT_DONE) {
+    std::printf("Parse URI: \"%s\", expected it to be valid but it was parsed invalid (%d)\n", test_case.input_uri.c_str(), result);
+    CHECK(false);
+  } else if (!expected_is_valid && result != PARSE_RESULT_ERROR) {
+    std::printf("Parse URI: \"%s\", expected it to be invalid but it was parsed valid (%d)\n", test_case.input_uri.c_str(), result);
+    CHECK(false);
+  }
+  if (result == PARSE_RESULT_DONE) {
+    char buf[1024];
+    int index  = 0;
+    int offset = 0;
+    url.print(buf, sizeof(buf), &index, &offset);
+    std::string printed_url{buf, static_cast<size_t>(index)};
+    if (parse_function == URL_PARSE) {
+      CHECK(test_case.expected_printed_url == printed_url);
+      CHECK(test_case.expected_printed_url.size() == printed_url.size());
+    } else if (parse_function == URL_PARSE_REGEX) {
+      CHECK(test_case.expected_printed_url_regex == printed_url);
+      CHECK(test_case.expected_printed_url_regex.size() == printed_url.size());
+    }
+  }
+  heap->destroy();
+}
+
+TEST_CASE("UrlParse", "[proxy][parseurl]")
+{
+  for (auto const &test_case : url_parse_test_cases) {
+    test_parse(test_case, URL_PARSE);
+    test_parse(test_case, URL_PARSE_REGEX);
+  }
+}
diff --git a/proxy/http/remap/RemapConfig.cc b/proxy/http/remap/RemapConfig.cc
index 24f9247..d8a2ce5 100644
--- a/proxy/http/remap/RemapConfig.cc
+++ b/proxy/http/remap/RemapConfig.cc
@@ -1086,12 +1086,13 @@ remap_parse_config_bti(const char *path, BUILD_TABLE_INFO *bti)
     }
 
     new_mapping->fromURL.create(nullptr);
-    rparse = new_mapping->fromURL.parse_no_path_component_breakdown(tmp, length);
+    rparse = new_mapping->fromURL.parse_regex(tmp, length);
 
     map_from_start[origLength] = '\0'; // Unwhack
 
     if (rparse != PARSE_RESULT_DONE) {
-      errStr = "malformed From URL";
+      snprintf(errStrBuf, sizeof(errStrBuf), "malformed From URL: %.*s", length, tmp);
+      errStr = errStrBuf;
       goto MAP_ERROR;
     }
 
@@ -1101,11 +1102,12 @@ remap_parse_config_bti(const char *path, BUILD_TABLE_INFO *bti)
     tmp          = map_to;
 
     new_mapping->toURL.create(nullptr);
-    rparse                   = new_mapping->toURL.parse_no_path_component_breakdown(tmp, length);
+    rparse                   = new_mapping->toURL.parse_no_host_check(std::string_view(tmp, length));
     map_to_start[origLength] = '\0'; // Unwhack
 
     if (rparse != PARSE_RESULT_DONE) {
-      errStr = "malformed To URL";
+      snprintf(errStrBuf, sizeof(errStrBuf), "malformed To URL: %.*s", length, tmp);
+      errStr = errStrBuf;
       goto MAP_ERROR;
     }
 
diff --git a/proxy/http2/unit_tests/test_HTTP2.cc b/proxy/http2/unit_tests/test_HTTP2.cc
index e67f50e..21a772a 100644
--- a/proxy/http2/unit_tests/test_HTTP2.cc
+++ b/proxy/http2/unit_tests/test_HTTP2.cc
@@ -64,7 +64,7 @@ TEST_CASE("Convert HTTPHdr", "[HTTP2]")
       MIMEField *f = hdr_1.field_find(HTTP2_VALUE_METHOD, HTTP2_LEN_METHOD);
       REQUIRE(f != nullptr);
       std::string_view v = f->value_get();
-      CHECK(v.compare("GET") == 0);
+      CHECK(v == "GET");
     }
 
     // :scheme
@@ -72,7 +72,7 @@ TEST_CASE("Convert HTTPHdr", "[HTTP2]")
       MIMEField *f = hdr_1.field_find(HTTP2_VALUE_SCHEME, HTTP2_LEN_SCHEME);
       REQUIRE(f != nullptr);
       std::string_view v = f->value_get();
-      CHECK(v.compare("https") == 0);
+      CHECK(v == "https");
     }
 
     // :authority
@@ -80,7 +80,7 @@ TEST_CASE("Convert HTTPHdr", "[HTTP2]")
       MIMEField *f = hdr_1.field_find(HTTP2_VALUE_AUTHORITY, HTTP2_LEN_AUTHORITY);
       REQUIRE(f != nullptr);
       std::string_view v = f->value_get();
-      CHECK(v.compare("trafficserver.apache.org") == 0);
+      CHECK(v == "trafficserver.apache.org");
     }
 
     // :path
@@ -88,7 +88,7 @@ TEST_CASE("Convert HTTPHdr", "[HTTP2]")
       MIMEField *f = hdr_1.field_find(HTTP2_VALUE_PATH, HTTP2_LEN_PATH);
       REQUIRE(f != nullptr);
       std::string_view v = f->value_get();
-      CHECK(v.compare("/index.html") == 0);
+      CHECK(v == "/index.html");
     }
 
     // convert to HTTP/1.1
@@ -138,7 +138,7 @@ TEST_CASE("Convert HTTPHdr", "[HTTP2]")
       MIMEField *f = hdr_1.field_find(HTTP2_VALUE_STATUS, HTTP2_LEN_STATUS);
       REQUIRE(f != nullptr);
       std::string_view v = f->value_get();
-      CHECK(v.compare("200") == 0);
+      CHECK(v == "200");
     }
 
     // no connection header
diff --git a/tests/gold_tests/autest-site/microDNS.test.ext b/tests/gold_tests/autest-site/microDNS.test.ext
index 83fbc7b..e508f5d 100644
--- a/tests/gold_tests/autest-site/microDNS.test.ext
+++ b/tests/gold_tests/autest-site/microDNS.test.ext
@@ -82,7 +82,7 @@ def MakeDNServer(obj, name, filename="dns_file.json", port=False, ip='INADDR_LOO
         jsondata = {'mappings': []}
 
         if default:
-            jsondata['otherwise'] = default
+            jsondata['otherwise'] = [default]
 
         with open(filepath, 'w') as f:
             f.write(json.dumps(jsondata))
diff --git a/tests/gold_tests/pluginTest/header_rewrite/gold/set-redirect.gold b/tests/gold_tests/pluginTest/header_rewrite/gold/set-redirect.gold
new file mode 100644
index 0000000..bd76250
--- /dev/null
+++ b/tests/gold_tests/pluginTest/header_rewrite/gold/set-redirect.gold
@@ -0,0 +1,8 @@
+``
+> HEAD / HTTP/1.1
+> Host: no_path.com
+``
+< HTTP/1.1 301 Redirect
+``
+< Location: http://no_path.com?name=brian/
+``
diff --git a/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_client.test.py b/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py
similarity index 58%
rename from tests/gold_tests/pluginTest/header_rewrite/header_rewrite_client.test.py
rename to tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py
index 0ab0f88..8f84271 100644
--- a/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_client.test.py
+++ b/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py
@@ -1,4 +1,5 @@
 '''
+Test header_rewrite with URL conditions and operators.
 '''
 #  Licensed to the Apache Software Foundation (ASF) under one
 #  or more contributor license agreements.  See the NOTICE file
@@ -17,44 +18,63 @@
 #  limitations under the License.
 
 Test.Summary = '''
-Test header_rewrite and CLIENT-URL
+Test header_rewrite with URL conditions and operators.
 '''
 
 Test.ContinueOnFail = True
-# Define default ATS
 ts = Test.MakeATSProcess("ts")
 server = Test.MakeOriginServer("server")
 
+# Configure the server to return 200 responses. The rewrite rules below set a
+# non-200 status, so if curl gets a 200 response something went wrong.
 Test.testName = ""
 request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
-# expected response from the origin server
 response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
-
-# add response to the server dictionary
 server.addResponse("sessionfile.log", request_header, response_header)
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: no_path.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionfile.log", request_header, response_header)
+
 ts.Disk.records_config.update({
     'proxy.config.diags.debug.enabled': 1,
     'proxy.config.diags.debug.tags': 'header.*',
 })
 # The following rule changes the status code returned from origin server to 303
 ts.Setup.CopyAs('rules/rule_client.conf', Test.RunDirectory)
+ts.Setup.CopyAs('rules/set_redirect.conf', Test.RunDirectory)
 
+# This configuration makes use of CLIENT-URL in conditions.
 ts.Disk.remap_config.AddLine(
-    'map http://www.example.com/from_path/ https://127.0.0.1:{0}/to_path/ @plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format(
+    'map http://www.example.com/from_path/ https://127.0.0.1:{0}/to_path/ '
+    '@plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format(
         server.Variables.Port, Test.RunDirectory))
 ts.Disk.remap_config.AddLine(
-    'map http://www.example.com:8080/from_path/ https://127.0.0.1:{0}/to_path/ @plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format(
+    'map http://www.example.com:8080/from_path/ https://127.0.0.1:{0}/to_path/ '
+    '@plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format(
         server.Variables.Port, Test.RunDirectory))
+# This configuration makes use of TO-URL in a set-redirect operator.
+ts.Disk.remap_config.AddLine(
+    'map http://no_path.com http://no_path.com?name=brian/ '
+    '@plugin=header_rewrite.so @pparam={0}/set_redirect.conf'.format(
+        Test.RunDirectory))
 
-# call localhost straight
+# Test CLIENT-URL.
 tr = Test.AddTestRun()
-tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com/from_path/hello?=foo=bar" -H "Proxy-Connection: keep-alive" --verbose'.format(
-    ts.Variables.port)
+tr.Processes.Default.Command = (
+    'curl --proxy 127.0.0.1:{0} "http://www.example.com/from_path/hello?=foo=bar" '
+    '-H "Proxy-Connection: keep-alive" --verbose'.format(
+        ts.Variables.port))
 tr.Processes.Default.ReturnCode = 0
-# time delay as proxy.config.http.wait_for_cache could be broken
 tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
 tr.Processes.Default.StartBefore(Test.Processes.ts)
 tr.Processes.Default.Streams.stderr = "gold/header_rewrite-client.gold"
 tr.StillRunningAfter = server
+ts.Streams.All = "gold/header_rewrite-tag.gold"
 
+# Test TO-URL in a set-redirect operator.
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --head 127.0.0.1:{0} -H "Host: no_path.com" --verbose'.format(
+    ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.Streams.stderr = "gold/set-redirect.gold"
+tr.StillRunningAfter = server
 ts.Streams.All = "gold/header_rewrite-tag.gold"
diff --git a/tests/gold_tests/pluginTest/header_rewrite/rules/set_redirect.conf b/tests/gold_tests/pluginTest/header_rewrite/rules/set_redirect.conf
new file mode 100644
index 0000000..6a3230a
--- /dev/null
+++ b/tests/gold_tests/pluginTest/header_rewrite/rules/set_redirect.conf
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set-redirect 301 %{TO-URL:URL}
diff --git a/tests/gold_tests/remap/gold/remap-zero-200.gold b/tests/gold_tests/remap/gold/remap-zero-200.gold
new file mode 100644
index 0000000..bcad791
--- /dev/null
+++ b/tests/gold_tests/remap/gold/remap-zero-200.gold
@@ -0,0 +1,7 @@
+``
+> GET / HTTP/1.1
+> Host: zero.one.two.three.com
+``
+< HTTP/1.1 200 OK
+< Date: ``
+``
diff --git a/tests/gold_tests/remap/regex_map.test.py b/tests/gold_tests/remap/regex_map.test.py
new file mode 100644
index 0000000..54b3e7e
--- /dev/null
+++ b/tests/gold_tests/remap/regex_map.test.py
@@ -0,0 +1,62 @@
+'''
+Verify correct behavior of regex_map in remap.config.
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+Test.Summary = '''
+Verify correct behavior of regex_map in remap.config.
+'''
+
+Test.ContinueOnFail = True
+ts = Test.MakeATSProcess("ts")
+server = Test.MakeOriginServer("server")
+dns = Test.MakeDNServer("dns", default='127.0.0.1')
+
+Test.testName = ""
+request_header = {"headers": "GET / HTTP/1.1\r\nHost: zero.one.two.three.com\r\n\r\n",
+                  "timestamp": "1469733493.993", "body": ""}
+response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n",
+                   "timestamp": "1469733493.993", "body": ""}
+server.addResponse("sessionfile.log", request_header, response_header)
+
+ts.Disk.records_config.update({
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap',
+    'proxy.config.http.referer_filter': 1,
+    'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port),
+    'proxy.config.dns.resolv_conf': 'NULL'
+})
+
+ts.Disk.remap_config.AddLine(
+    r'regex_map '
+    r'http://(.*)?one\.two\.three\.com/ '
+    r'http://$1reactivate.four.five.six.com:{}/'.format(server.Variables.Port)
+)
+ts.Disk.remap_config.AddLine(
+    r'regex_map '
+    r'https://\b(?!(.*one|two|three|four|five|six)).+\b\.seven\.eight\.nine\.com/blah12345.html '
+    r'https://www.example.com:{}/one/two/three/blah12345.html'.format(server.Variables.Port)
+)
+
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl -H"Host: zero.one.two.three.com" http://127.0.0.1:{0}/ --verbose'.format(ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.StartBefore(dns)
+tr.Processes.Default.StartBefore(Test.Processes.ts)
+tr.Processes.Default.Streams.stderr = "gold/remap-zero-200.gold"
+tr.StillRunningAfter = server


[trafficserver] 01/05: Convert Mime and URL unit tests in proxy/hdrs to Catch.

Posted by zw...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit 40e9d411229ddab4860c9287cc48abee500dc414
Author: Walter Karas <wk...@verizonmedia.com>
AuthorDate: Wed Mar 18 13:01:13 2020 -0500

    Convert Mime and URL unit tests in proxy/hdrs to Catch.
    
    (cherry picked from commit 051661e1e9ff3a3d74250f31f0a72da414484b0e)
    
     Conflicts:
    	proxy/hdrs/test_mime.cc
---
 proxy/hdrs/Makefile.am             |  17 +--
 proxy/hdrs/URL.cc                  |  85 ++-----------
 proxy/hdrs/test_mime.cc            | 104 ----------------
 proxy/hdrs/unit_tests/test_URL.cc  |  95 ++++++++++++++
 proxy/hdrs/unit_tests/test_mime.cc | 249 +++++++++++++++++++++++++++++++++++++
 5 files changed, 356 insertions(+), 194 deletions(-)

diff --git a/proxy/hdrs/Makefile.am b/proxy/hdrs/Makefile.am
index 950dd2b..7fcd248 100644
--- a/proxy/hdrs/Makefile.am
+++ b/proxy/hdrs/Makefile.am
@@ -60,7 +60,6 @@ load_http_hdr_LDADD = -L. -lhdrs \
 	$(top_builddir)/src/tscpp/util/libtscpputil.la
 
 check_PROGRAMS = \
-	test_mime \
 	test_proxy_hdrs \
 	test_hdr_heap \
 	test_Huffmancode \
@@ -68,25 +67,15 @@ check_PROGRAMS = \
 
 TESTS = $(check_PROGRAMS)
 
-test_mime_LDADD = -L. -lhdrs \
-	$(top_builddir)/src/tscore/libtscore.la \
-	$(top_builddir)/src/tscpp/util/libtscpputil.la \
-	$(top_builddir)/iocore/eventsystem/libinkevent.a \
-	$(top_builddir)/lib/records/librecords_p.a \
-	$(top_builddir)/mgmt/libmgmt_p.la \
-	$(top_builddir)/proxy/shared/libUglyLogStubs.a \
-	@HWLOC_LIBS@ \
-	@LIBCAP@
-
-test_mime_SOURCES = test_mime.cc
-
 test_proxy_hdrs_CPPFLAGS = $(AM_CPPFLAGS) \
 	-I$(abs_top_srcdir)/tests/include
 
 test_proxy_hdrs_SOURCES = \
 	unit_tests/unit_test_main.cc \
 	unit_tests/test_Hdrs.cc \
-	unit_tests/test_HdrUtils.cc
+	unit_tests/test_HdrUtils.cc \
+	unit_tests/test_URL.cc \
+	unit_tests/test_mime.cc
 
 test_proxy_hdrs_LDADD = \
 	$(top_builddir)/src/tscore/libtscore.la \
diff --git a/proxy/hdrs/URL.cc b/proxy/hdrs/URL.cc
index ad100a5..c528ee8 100644
--- a/proxy/hdrs/URL.cc
+++ b/proxy/hdrs/URL.cc
@@ -1160,11 +1160,16 @@ url_parse_scheme(HdrHeap *heap, URLImpl *url, const char **start, const char *en
   return PARSE_RESULT_ERROR; // no non-whitespace found
 }
 
+// This implementation namespace is necessary because this function is tested by a Catch unit test
+// in another source file.
+//
+namespace UrlImpl
+{
 /**
  *  This method will return TRUE if the uri is strictly compliant with
  *  RFC 3986 and it will return FALSE if not.
  */
-static bool
+bool
 url_is_strictly_compliant(const char *start, const char *end)
 {
   for (const char *i = start; i < end; ++i) {
@@ -1176,6 +1181,9 @@ url_is_strictly_compliant(const char *start, const char *end)
   return true;
 }
 
+} // namespace UrlImpl
+using namespace UrlImpl;
+
 ParseResult
 url_parse(HdrHeap *heap, URLImpl *url, const char **start, const char *end, bool copy_strings_p, bool strict_uri_parsing)
 {
@@ -1801,78 +1809,3 @@ url_host_CryptoHash_get(URLImpl *url, CryptoHash *hash)
   ctx.update(&port, sizeof(port));
   ctx.finalize(*hash);
 }
-
-/*-------------------------------------------------------------------------
- * Regression tests
-  -------------------------------------------------------------------------*/
-#if TS_HAS_TESTS
-#include "tscore/TestBox.h"
-
-const static struct {
-  const char *const text;
-  bool valid;
-} http_validate_hdr_field_test_case[] = {{"yahoo", true},
-                                         {"yahoo.com", true},
-                                         {"yahoo.wow.com", true},
-                                         {"yahoo.wow.much.amaze.com", true},
-                                         {"209.131.52.50", true},
-                                         {"192.168.0.1", true},
-                                         {"localhost", true},
-                                         {"3ffe:1900:4545:3:200:f8ff:fe21:67cf", true},
-                                         {"fe80:0:0:0:200:f8ff:fe21:67cf", true},
-                                         {"fe80::200:f8ff:fe21:67cf", true},
-                                         {"<svg onload=alert(1)>", false}, // Sample host header XSS attack
-                                         {"jlads;f8-9349*(D&F*D(234jD*(FSD*(VKLJ#(*$@()#$)))))", false},
-                                         {"\"\t\n", false},
-                                         {"!@#$%^ &*(*&^%$#@#$%^&*(*&^%$#))", false},
-                                         {":):(:O!!!!!!", false}};
-
-REGRESSION_TEST(VALIDATE_HDR_FIELD)(RegressionTest *t, int /* level ATS_UNUSED */, int *pstatus)
-{
-  TestBox box(t, pstatus);
-  box = REGRESSION_TEST_PASSED;
-
-  for (auto i : http_validate_hdr_field_test_case) {
-    const char *const txt = i.text;
-    box.check(validate_host_name({txt}) == i.valid, "Validation of FQDN (host) header: \"%s\", expected %s, but not", txt,
-              (i.valid ? "true" : "false"));
-  }
-}
-
-REGRESSION_TEST(ParseRules_strict_URI)(RegressionTest *t, int /* level ATS_UNUSED */, int *pstatus)
-{
-  const struct {
-    const char *const uri;
-    bool valid;
-  } http_strict_uri_parsing_test_case[] = {{"/home", true},
-                                           {"/path/data?key=value#id", true},
-                                           {"/ABCDEFGHIJKLMNOPQRSTUVWXYZ", true},
-                                           {"/abcdefghijklmnopqrstuvwxyz", true},
-                                           {"/0123456789", true},
-                                           {":/?#[]@", true},
-                                           {"!$&'()*+,;=", true},
-                                           {"-._~", true},
-                                           {"%", true},
-                                           {"\n", false},
-                                           {"\"", false},
-                                           {"<", false},
-                                           {">", false},
-                                           {"\\", false},
-                                           {"^", false},
-                                           {"`", false},
-                                           {"{", false},
-                                           {"|", false},
-                                           {"}", false},
-                                           {"é", false}};
-
-  TestBox box(t, pstatus);
-  box = REGRESSION_TEST_PASSED;
-
-  for (auto i : http_strict_uri_parsing_test_case) {
-    const char *const uri = i.uri;
-    box.check(url_is_strictly_compliant(uri, uri + strlen(uri)) == i.valid, "Strictly parse URI: \"%s\", expected %s, but not", uri,
-              (i.valid ? "true" : "false"));
-  }
-}
-
-#endif // TS_HAS_TESTS
diff --git a/proxy/hdrs/test_mime.cc b/proxy/hdrs/test_mime.cc
deleted file mode 100644
index eba287d..0000000
--- a/proxy/hdrs/test_mime.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-/** @file
-
-  A brief file description
-
-  @section license License
-
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
- */
-
-#include "tscore/TestBox.h"
-#include "I_EventSystem.h"
-#include "MIME.h"
-
-REGRESSION_TEST(MIME)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
-{
-  TestBox box(t, pstatus);
-  box = REGRESSION_TEST_PASSED;
-
-  MIMEField *field;
-  MIMEHdr hdr;
-  hdr.create(NULL);
-
-  hdr.field_create("Test1", 5);
-  hdr.field_create("Test2", 5);
-  hdr.field_create("Test3", 5);
-  hdr.field_create("Test4", 5);
-  field = hdr.field_create("Test5", 5);
-
-  box.check(hdr.m_mime->m_first_fblock.contains(field), "The field block doesn't contain the field but it should");
-  box.check(!hdr.m_mime->m_first_fblock.contains(field + (1L << 32)), "The field block contains the field but it shouldn't");
-
-  int slot_num = mime_hdr_field_slotnum(hdr.m_mime, field);
-  box.check(slot_num == 4, "Slot number is %d but should be 4", slot_num);
-
-  slot_num = mime_hdr_field_slotnum(hdr.m_mime, field + (1L << 32));
-  box.check(slot_num == -1, "Slot number is %d but should be -1", slot_num);
-
-  hdr.destroy();
-}
-
-REGRESSION_TEST(MIME_PARSERS)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus)
-{
-  const char *end;
-  int value;
-  TestBox box(t, pstatus);
-  box = REGRESSION_TEST_PASSED;
-
-  std::vector<std::pair<const char *, int>> tests = {{"0", 0},
-                                                     {"1234", 1234},
-                                                     {"-1234", -1234},
-                                                     {"2147483647", 2147483647},
-                                                     {"-2147483648", 2147483648},
-                                                     {"2147483648", INT_MAX},
-                                                     {"-2147483649", INT_MIN},
-                                                     {"2147483647", INT_MAX},
-                                                     {"-2147483648", INT_MIN},
-                                                     {"999999999999", INT_MAX},
-                                                     {"-999999999999", INT_MIN}};
-
-  for (const auto &it : tests) {
-    auto [buf, val] = it;
-
-    end = buf + strlen(buf);
-    box.check(mime_parse_int(buf, end) == val, "Failed mime_parse_int");
-    box.check(mime_parse_integer(buf, end, &value), "Failed mime_parse_integer call");
-    box.check(value == val, "Failed mime_parse_integer value");
-  }
-
-  // Also check the date parser, which relies heavily on the mime_parse_integer() function
-  const char *date1 = "Sun, 05 Dec 1999 08:49:37 GMT";
-  const char *date2 = "Sunday, 05-Dec-1999 08:49:37 GMT";
-
-  int d1 = mime_parse_date(date1, date1 + strlen(date1));
-  int d2 = mime_parse_date(date2, date2 + strlen(date2));
-
-  box.check(d1 == d2, "Failed mime_parse_date");
-
-  printf("Date1: %d\n", d1);
-  printf("Date2: %d\n", d2);
-}
-
-int
-main(int argc, const char **argv)
-{
-  Thread *main_thread = new EThread();
-  main_thread->set_specific();
-  mime_init();
-
-  return RegressionTest::main(argc, argv, REGRESSION_TEST_QUICK);
-}
diff --git a/proxy/hdrs/unit_tests/test_URL.cc b/proxy/hdrs/unit_tests/test_URL.cc
new file mode 100644
index 0000000..9ebcd9d
--- /dev/null
+++ b/proxy/hdrs/unit_tests/test_URL.cc
@@ -0,0 +1,95 @@
+/** @file
+
+   Catch-based unit tests for URL
+
+   @section license License
+
+   Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements.
+   See the NOTICE file distributed with this work for additional information regarding copyright
+   ownership.  The ASF licenses this file to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance with the License.  You may obtain a
+   copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software distributed under the License
+   is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+   or implied. See the License for the specific language governing permissions and limitations under
+   the License.
+ */
+
+#include <cstdio>
+
+#include "catch.hpp"
+
+#include "URL.h"
+
+TEST_CASE("ValidateURL", "[proxy][validurl]")
+{
+  static const struct {
+    const char *const text;
+    bool valid;
+  } http_validate_hdr_field_test_case[] = {{"yahoo", true},
+                                           {"yahoo.com", true},
+                                           {"yahoo.wow.com", true},
+                                           {"yahoo.wow.much.amaze.com", true},
+                                           {"209.131.52.50", true},
+                                           {"192.168.0.1", true},
+                                           {"localhost", true},
+                                           {"3ffe:1900:4545:3:200:f8ff:fe21:67cf", true},
+                                           {"fe80:0:0:0:200:f8ff:fe21:67cf", true},
+                                           {"fe80::200:f8ff:fe21:67cf", true},
+                                           {"<svg onload=alert(1)>", false}, // Sample host header XSS attack
+                                           {"jlads;f8-9349*(D&F*D(234jD*(FSD*(VKLJ#(*$@()#$)))))", false},
+                                           {"\"\t\n", false},
+                                           {"!@#$%^ &*(*&^%$#@#$%^&*(*&^%$#))", false},
+                                           {":):(:O!!!!!!", false}};
+  for (auto i : http_validate_hdr_field_test_case) {
+    const char *const txt = i.text;
+    if (validate_host_name({txt}) != i.valid) {
+      std::printf("Validation of FQDN (host) header: \"%s\", expected %s, but not\n", txt, (i.valid ? "true" : "false"));
+      CHECK(false);
+    }
+  }
+}
+
+namespace UrlImpl
+{
+bool url_is_strictly_compliant(const char *start, const char *end);
+}
+using namespace UrlImpl;
+
+TEST_CASE("ParseRulesStrictURI", "[proxy][parseuri]")
+{
+  const struct {
+    const char *const uri;
+    bool valid;
+  } http_strict_uri_parsing_test_case[] = {{"/home", true},
+                                           {"/path/data?key=value#id", true},
+                                           {"/ABCDEFGHIJKLMNOPQRSTUVWXYZ", true},
+                                           {"/abcdefghijklmnopqrstuvwxyz", true},
+                                           {"/0123456789", true},
+                                           {":/?#[]@", true},
+                                           {"!$&'()*+,;=", true},
+                                           {"-._~", true},
+                                           {"%", true},
+                                           {"\n", false},
+                                           {"\"", false},
+                                           {"<", false},
+                                           {">", false},
+                                           {"\\", false},
+                                           {"^", false},
+                                           {"`", false},
+                                           {"{", false},
+                                           {"|", false},
+                                           {"}", false},
+                                           {"é", false}};
+
+  for (auto i : http_strict_uri_parsing_test_case) {
+    const char *const uri = i.uri;
+    if (url_is_strictly_compliant(uri, uri + strlen(uri)) != i.valid) {
+      std::printf("Strictly parse URI: \"%s\", expected %s, but not\n", uri, (i.valid ? "true" : "false"));
+      CHECK(false);
+    }
+  }
+}
diff --git a/proxy/hdrs/unit_tests/test_mime.cc b/proxy/hdrs/unit_tests/test_mime.cc
new file mode 100644
index 0000000..0ad7038
--- /dev/null
+++ b/proxy/hdrs/unit_tests/test_mime.cc
@@ -0,0 +1,249 @@
+/** @file
+
+  A brief file description
+
+  @section license License
+
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ */
+
+#include <cstdio>
+
+#include "catch.hpp"
+
+#include "I_EventSystem.h"
+#include "MIME.h"
+
+TEST_CASE("Mime", "[proxy][mime]")
+{
+  MIMEField *field;
+  MIMEHdr hdr;
+  hdr.create(NULL);
+
+  hdr.field_create("Test1", 5);
+  hdr.field_create("Test2", 5);
+  hdr.field_create("Test3", 5);
+  hdr.field_create("Test4", 5);
+  field = hdr.field_create("Test5", 5);
+
+  if (!hdr.m_mime->m_first_fblock.contains(field)) {
+    std::printf("The field block doesn't contain the field but it should\n");
+    CHECK(false);
+  }
+  if (hdr.m_mime->m_first_fblock.contains(field + (1L << 32))) {
+    std::printf("The field block contains the field but it shouldn't\n");
+    CHECK(false);
+  }
+
+  int slot_num = mime_hdr_field_slotnum(hdr.m_mime, field);
+  if (slot_num != 4) {
+    std::printf("Slot number is %d but should be 4\n", slot_num);
+    CHECK(false);
+  }
+
+  slot_num = mime_hdr_field_slotnum(hdr.m_mime, field + (1L << 32));
+  if (slot_num != -1) {
+    std::printf("Slot number is %d but should be -1\n", slot_num);
+    CHECK(false);
+  }
+
+  hdr.destroy();
+}
+
+TEST_CASE("MimeGetHostPortValues", "[proxy][mimeport]")
+{
+  MIMEHdr hdr;
+  hdr.create(NULL);
+
+  const char *header_value;
+  const char *host;
+  int host_len;
+  const char *port;
+  int port_len;
+
+  header_value = "host";
+  hdr.value_set("Host", 4, header_value, strlen(header_value));
+  hdr.get_host_port_values(&host, &host_len, &port, &port_len);
+  if (host_len != 4) {
+    std::printf("host length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(host, "host", host_len) != 0) {
+    std::printf("host string doesn't match\n");
+    CHECK(false);
+  }
+  if (port_len != 0) {
+    std::printf("port length doesn't match\n");
+    CHECK(false);
+  }
+  if (port != nullptr) {
+    std::printf("port string doesn't match\n");
+    CHECK(false);
+  }
+
+  header_value = "host:";
+  hdr.value_set("Host", 4, header_value, strlen(header_value));
+  hdr.get_host_port_values(&host, &host_len, &port, &port_len);
+  if (host_len != 4) {
+    std::printf("host length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(host, "host", host_len) != 0) {
+    std::printf("host string doesn't match\n");
+    CHECK(false);
+  }
+  if (port_len != 0) {
+    std::printf("port length doesn't match\n");
+    CHECK(false);
+  }
+  if (port != nullptr) {
+    std::printf("port string doesn't match\n");
+    CHECK(false);
+  }
+
+  header_value = "[host]";
+  hdr.value_set("Host", 4, header_value, strlen(header_value));
+  hdr.get_host_port_values(&host, &host_len, &port, &port_len);
+  if (host_len != 6) {
+    std::printf("host length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(host, "[host]", host_len) != 0) {
+    std::printf("host string doesn't match\n");
+    CHECK(false);
+  }
+  if (port_len != 0) {
+    std::printf("port length doesn't match\n");
+    CHECK(false);
+  }
+  if (port != nullptr) {
+    std::printf("port string doesn't match\n");
+    CHECK(false);
+  }
+
+  header_value = "host:port";
+  hdr.value_set("Host", 4, header_value, strlen(header_value));
+  hdr.get_host_port_values(&host, &host_len, &port, &port_len);
+  if (host_len != 4) {
+    std::printf("host length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(host, "host", host_len) != 0) {
+    std::printf("host string doesn't match\n");
+    CHECK(false);
+  }
+  if (port_len != 4) {
+    std::printf("port length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(port, "port", port_len) != 0) {
+    std::printf("port string doesn't match\n");
+    CHECK(false);
+  }
+
+  header_value = "[host]:port";
+  hdr.value_set("Host", 4, header_value, strlen(header_value));
+  hdr.get_host_port_values(&host, &host_len, &port, &port_len);
+  if (host_len != 6) {
+    std::printf("host length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(host, "[host]", host_len) != 0) {
+    std::printf("host string doesn't match\n");
+    CHECK(false);
+  }
+  if (port_len != 4) {
+    std::printf("port length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(port, "port", port_len) != 0) {
+    std::printf("port string doesn't match\n");
+    CHECK(false);
+  }
+
+  header_value = "[host]:";
+  hdr.value_set("Host", 4, header_value, strlen(header_value));
+  hdr.get_host_port_values(&host, &host_len, &port, &port_len);
+  if (host_len != 6) {
+    std::printf("host length doesn't match\n");
+    CHECK(false);
+  }
+  if (strncmp(host, "[host]", host_len) != 0) {
+    std::printf("host string doesn't match\n");
+    CHECK(false);
+  }
+  if (port_len != 0) {
+    std::printf("port length doesn't match\n");
+    CHECK(false);
+  }
+  if (port != nullptr) {
+    std::printf("port string doesn't match\n");
+    CHECK(false);
+  }
+
+  hdr.destroy();
+}
+
+TEST_CASE("MimeParsers", "[proxy][mimeparsers]")
+{
+  const char *end;
+  int value;
+
+  static const std::vector<std::pair<const char *, int>> tests = {{"0", 0},
+                                                                  {"1234", 1234},
+                                                                  {"-1234", -1234},
+                                                                  {"2147483647", 2147483647},
+                                                                  {"-2147483648", 2147483648},
+                                                                  {"2147483648", INT_MAX},
+                                                                  {"-2147483649", INT_MIN},
+                                                                  {"2147483647", INT_MAX},
+                                                                  {"-2147483648", INT_MIN},
+                                                                  {"999999999999", INT_MAX},
+                                                                  {"-999999999999", INT_MIN}};
+
+  for (const auto &it : tests) {
+    auto [buf, val] = it;
+
+    end = buf + strlen(buf);
+    if (mime_parse_int(buf, end) != val) {
+      std::printf("Failed mime_parse_int\n");
+      CHECK(false);
+    }
+    if (!mime_parse_integer(buf, end, &value)) {
+      std::printf("Failed mime_parse_integer call\n");
+      CHECK(false);
+    } else if (value != val) {
+      std::printf("Failed mime_parse_integer value\n");
+      CHECK(false);
+    }
+  }
+
+  // Also check the date parser, which relies heavily on the mime_parse_integer() function
+  const char *date1 = "Sun, 05 Dec 1999 08:49:37 GMT";
+  const char *date2 = "Sunday, 05-Dec-1999 08:49:37 GMT";
+
+  int d1 = mime_parse_date(date1, date1 + strlen(date1));
+  int d2 = mime_parse_date(date2, date2 + strlen(date2));
+
+  if (d1 != d2) {
+    std::printf("Failed mime_parse_date\n");
+    CHECK(false);
+  }
+
+  std::printf("Date1: %d\n", d1);
+  std::printf("Date2: %d\n", d2);
+}