You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by am...@apache.org on 2015/07/19 16:14:41 UTC

[1/8] trafficserver git commit: TS-974: Partial Object Caching.

Repository: trafficserver
Updated Branches:
  refs/heads/poc-6-0-x [created] 1c06db831


http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogAccess.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogAccess.cc b/proxy/logging/LogAccess.cc
index bd2aacb..ecdf7c9 100644
--- a/proxy/logging/LogAccess.cc
+++ b/proxy/logging/LogAccess.cc
@@ -49,7 +49,6 @@
 #include "LogBuffer.h"
 #include "Log.h"
 
-
 /*-------------------------------------------------------------------------
   LogAccess::init
   -------------------------------------------------------------------------*/
@@ -401,7 +400,6 @@ LogAccess::marshal_proxy_host_ip(char *buf)
   return marshal_ip(buf, &Machine::instance()->ip.sa);
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
@@ -477,7 +475,6 @@ LogAccess::marshal_server_resp_time_s(char *buf)
   DEFAULT_INT_FIELD;
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
@@ -565,7 +562,6 @@ LogAccess::marshal_http_header_field(LogField::Container /* container ATS_UNUSED
   DEFAULT_STR_FIELD;
 }
 
-
 /*-------------------------------------------------------------------------
 
   -------------------------------------------------------------------------*/
@@ -769,11 +765,9 @@ LogAccess::marshal_record(char *record, char *buf)
   ink_assert(num_chars <= max_chars);
   memcpy(buf, out_buf, num_chars);
 
-
   return max_chars;
 }
 
-
 /*-------------------------------------------------------------------------
   LogAccess::marshal_str
 
@@ -1011,7 +1005,8 @@ LogAccess::unmarshal_int_to_str(char **buf, char *dest, int len)
 /*-------------------------------------------------------------------------
   LogAccess::unmarshal_int_to_str_hex
 
-  Return the string representation (hexadecimal) of the integer pointed at by buf.
+  Return the string representation (hexadecimal) of the integer pointed at by
+  buf.
   -------------------------------------------------------------------------*/
 
 int
@@ -1032,7 +1027,6 @@ LogAccess::unmarshal_int_to_str_hex(char **buf, char *dest, int len)
   return -1;
 }
 
-
 /*-------------------------------------------------------------------------
   LogAccess::unmarshal_str
 
@@ -1326,7 +1320,6 @@ LogAccess::unmarshal_finish_status(char **buf, char *dest, int len, Ptr<LogField
   return (LogAccess::unmarshal_with_map(unmarshal_int(buf), dest, len, map, "UNKNOWN_FINISH_CODE"));
 }
 
-
 /*-------------------------------------------------------------------------
   LogAccess::unmarshal_cache_code
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogAccessHttp.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogAccessHttp.cc b/proxy/logging/LogAccessHttp.cc
index a0a5689..7b90469 100644
--- a/proxy/logging/LogAccessHttp.cc
+++ b/proxy/logging/LogAccessHttp.cc
@@ -111,7 +111,8 @@ LogAccessHttp::init()
       //
       // here is the assert
       //
-      // assert (m_proxy_resp_content_type_str[0] >= 'A' && m_proxy_resp_content_type_str[0] <= 'z');
+      // assert (m_proxy_resp_content_type_str[0] >= 'A' &&
+      // m_proxy_resp_content_type_str[0] <= 'z');
       LogUtils::remove_content_type_attributes(m_proxy_resp_content_type_str, &m_proxy_resp_content_type_len);
     } else {
       // If Content-Type field is missing, check for @Content-Type
@@ -136,7 +137,8 @@ LogAccessHttp::init()
 /*-------------------------------------------------------------------------
   The set routines ...
 
-  These routines are used by the WIPE_FIELD_VALUE filter to replace the original req url
+  These routines are used by the WIPE_FIELD_VALUE filter to replace the original
+  req url
   strings with the WIPED req strings.
   -------------------------------------------------------------------------*/
 
@@ -188,14 +190,14 @@ LogAccessHttp::set_client_req_unmapped_url_host(char *buf, int len)
 void
 LogAccessHttp::set_client_req_url_path(char *buf, int len)
 {
-  //?? use m_client_req_unmapped_url_path_str for now..may need to enhance later..
+  //?? use m_client_req_unmapped_url_path_str for now..may need to enhance
+  // later..
   if (buf) {
     m_client_req_url_path_len = len;
     ink_strlcpy(m_client_req_unmapped_url_path_str, buf, m_client_req_url_path_len + 1);
   }
 }
 
-
 /*-------------------------------------------------------------------------
   The marshalling routines ...
 
@@ -289,7 +291,6 @@ LogAccessHttp::marshal_client_auth_user_name(char *buf)
   return len;
 }
 
-
 /*-------------------------------------------------------------------------
   Private utility function to validate m_client_req_unmapped_url_canon_str &
   m_client_req_unmapped_url_canon_len fields.
@@ -349,7 +350,6 @@ LogAccessHttp::validate_unmapped_url_path(void)
   }
 }
 
-
 /*-------------------------------------------------------------------------
   Private utility function to validate m_cache_lookup_url_canon_str &
   m_cache_lookup__url_canon_len fields.
@@ -447,7 +447,6 @@ LogAccessHttp::marshal_client_req_url_canon(char *buf)
   return len;
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
@@ -861,7 +860,6 @@ LogAccessHttp::marshal_server_host_ip(char *buf)
   return marshal_ip(buf, ip);
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
@@ -884,7 +882,6 @@ LogAccessHttp::marshal_server_host_name(char *buf)
   return padded_len;
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
@@ -1041,7 +1038,6 @@ LogAccessHttp::marshal_cache_resp_http_version(char *buf)
   return (2 * INK_MIN_ALIGN);
 }
 
-
 int
 LogAccessHttp::marshal_client_retry_after_time(char *buf)
 {
@@ -1085,7 +1081,6 @@ convert_cache_write_code(HttpTransact::CacheWriteStatus_t t)
   return code;
 }
 
-
 int
 LogAccessHttp::marshal_cache_write_code(char *buf)
 {
@@ -1108,7 +1103,6 @@ LogAccessHttp::marshal_cache_write_transform_code(char *buf)
   return INK_MIN_ALIGN;
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogAccessHttp.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogAccessHttp.h b/proxy/logging/LogAccessHttp.h
index 93731b8..4093592 100644
--- a/proxy/logging/LogAccessHttp.h
+++ b/proxy/logging/LogAccessHttp.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_ACCESS_HTTP_H
 #define LOG_ACCESS_HTTP_H
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogAccessICP.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogAccessICP.h b/proxy/logging/LogAccessICP.h
index 2fdd49a..7e293c3 100644
--- a/proxy/logging/LogAccessICP.h
+++ b/proxy/logging/LogAccessICP.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_ACCESS_ICP_H
 #define LOG_ACCESS_ICP_H
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogAccessTest.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogAccessTest.h b/proxy/logging/LogAccessTest.h
index c5a62ce..103597c 100644
--- a/proxy/logging/LogAccessTest.h
+++ b/proxy/logging/LogAccessTest.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_ACCESS_TEST_H
 #define LOG_ACCESS_TEST_H
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogBuffer.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogBuffer.cc b/proxy/logging/LogBuffer.cc
index 6292498..737fc80 100644
--- a/proxy/logging/LogBuffer.cc
+++ b/proxy/logging/LogBuffer.cc
@@ -45,7 +45,6 @@
 #include "LogBuffer.h"
 #include "Log.h"
 
-
 struct FieldListCacheElement {
   LogFieldList *fieldlist;
   char *symbol_str;
@@ -342,7 +341,6 @@ LogBuffer::checkin_write(size_t write_offset)
   return ret_val;
 }
 
-
 unsigned
 LogBuffer::add_header_str(const char *str, char *buf_ptr, unsigned buf_len)
 {
@@ -356,7 +354,6 @@ LogBuffer::add_header_str(const char *str, char *buf_ptr, unsigned buf_len)
   return len;
 }
 
-
 size_t
 LogBuffer::_add_buffer_header()
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogBuffer.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogBuffer.h b/proxy/logging/LogBuffer.h
index 160aac0..bcaac54 100644
--- a/proxy/logging/LogBuffer.h
+++ b/proxy/logging/LogBuffer.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_BUFFER_H
 #define LOG_BUFFER_H
 
@@ -95,7 +94,6 @@ struct LogBufferHeader {
   char *log_filename();
 };
 
-
 union LB_State {
   LB_State() : ival(0) {}
 
@@ -197,7 +195,8 @@ public:
   static void
   destroy(LogBuffer *lb)
   {
-    // ink_atomic_increment() returns the previous value, so when it was 1, we are
+    // ink_atomic_increment() returns the previous value, so when it was 1, we
+    // are
     // the thread that decremented to zero and should delete ...
     int refcnt = ink_atomic_increment(&lb->m_references, -1);
 
@@ -214,7 +213,8 @@ private:
   size_t m_size;                    // the buffer size
   size_t m_buf_align;               // the buffer alignment
   size_t m_write_align;             // the write alignment mask
-  int m_buffer_fast_allocator_size; // indicates whether the logbuffer is allocated from ioBuf
+  int m_buffer_fast_allocator_size; // indicates whether the logbuffer is
+                                    // allocated from ioBuf
 
   long m_expiration_time; // buffer expiration time
 
@@ -293,7 +293,6 @@ private:
   LogBufferIterator &operator=(const LogBufferIterator &);
 };
 
-
 /*-------------------------------------------------------------------------
   LogBufferIterator
 
@@ -320,7 +319,6 @@ inline LogBufferIterator::LogBufferIterator(LogBufferHeader *header, bool in_net
   }
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogCollationHostSM.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogCollationHostSM.cc b/proxy/logging/LogCollationHostSM.cc
index dc73d87..6f5c249 100644
--- a/proxy/logging/LogCollationHostSM.cc
+++ b/proxy/logging/LogCollationHostSM.cc
@@ -284,7 +284,8 @@ LogCollationHostSM::host_recv(int event, void * /* data ATS_UNUSED */)
       log_buffer_header = (LogBufferHeader *)m_read_buffer;
 
       // convert the buffer we just received to host order
-      // TODO: We currently don't try to make the log buffers handle little vs big endian. TS-1156.
+      // TODO: We currently don't try to make the log buffers handle little vs
+      // big endian. TS-1156.
       // LogBuffer::convert_to_host_order(log_buffer_header);
 
       version = log_buffer_header->version;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogConfig.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogConfig.h b/proxy/logging/LogConfig.h
index 6143d91..23e7d57 100644
--- a/proxy/logging/LogConfig.h
+++ b/proxy/logging/LogConfig.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_CONFIG_H
 #define LOG_CONFIG_H
 
@@ -39,7 +38,6 @@ enum {
   log_stat_event_log_error_aggr_stat,
   log_stat_event_log_error_full_stat,
   log_stat_event_log_error_fail_stat,
-
   log_stat_event_log_access_ok_stat,
   log_stat_event_log_access_skip_stat,
   log_stat_event_log_access_aggr_stat,
@@ -52,12 +50,10 @@ enum {
   log_stat_num_received_from_network_stat,
   log_stat_num_flush_to_disk_stat,
   log_stat_num_lost_before_flush_to_disk_stat,
-
   log_stat_bytes_lost_before_preproc_stat,
   log_stat_bytes_sent_to_network_stat,
   log_stat_bytes_lost_before_sent_to_network_stat,
   log_stat_bytes_received_from_network_stat,
-
   log_stat_bytes_flush_to_disk_stat,
   log_stat_bytes_lost_before_flush_to_disk_stat,
   log_stat_bytes_written_to_disk_stat,
@@ -66,7 +62,6 @@ enum {
   // Logging I/O
   log_stat_log_files_open_stat,
   log_stat_log_files_space_used_stat,
-
   log_stat_count
 };
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogField.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogField.cc b/proxy/logging/LogField.cc
index 64d01b0..81ef288 100644
--- a/proxy/logging/LogField.cc
+++ b/proxy/logging/LogField.cc
@@ -405,7 +405,6 @@ LogField::set_aggregate_op(LogField::Aggregate agg_op)
   }
 }
 
-
 void
 LogField::update_aggregate(int64_t val)
 {
@@ -439,7 +438,6 @@ LogField::update_aggregate(int64_t val)
         m_symbol, val, m_agg_val, m_agg_cnt);
 }
 
-
 LogField::Container
 LogField::valid_container_name(char *name)
 {
@@ -451,7 +449,6 @@ LogField::valid_container_name(char *name)
   return LogField::NO_CONTAINER;
 }
 
-
 LogField::Aggregate
 LogField::valid_aggregate_name(char *name)
 {
@@ -463,7 +460,6 @@ LogField::valid_aggregate_name(char *name)
   return LogField::NO_AGGREGATE;
 }
 
-
 bool
 LogField::fieldlist_contains_aggregates(char *fieldlist)
 {
@@ -476,7 +472,6 @@ LogField::fieldlist_contains_aggregates(char *fieldlist)
   return contains_aggregates;
 }
 
-
 /*-------------------------------------------------------------------------
   LogFieldList
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogField.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogField.h b/proxy/logging/LogField.h
index 09562da..36dc081 100644
--- a/proxy/logging/LogField.h
+++ b/proxy/logging/LogField.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_FIELD_H
 #define LOG_FIELD_H
 
@@ -58,7 +57,6 @@ struct LogSlice {
   int toStrOffset(int strlen, int *offset);
 };
 
-
 /*-------------------------------------------------------------------------
   LogField
 
@@ -79,7 +77,6 @@ public:
   typedef int (*UnmarshalFuncWithMap)(char **buf, char *dest, int len, Ptr<LogFieldAliasMap> map);
   typedef void (LogAccess::*SetFunc)(char *buf, int len);
 
-
   enum Type {
     sINT = 0,
     dINT,

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFieldAliasMap.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFieldAliasMap.h b/proxy/logging/LogFieldAliasMap.h
index e355232..9edadd7 100644
--- a/proxy/logging/LogFieldAliasMap.h
+++ b/proxy/logging/LogFieldAliasMap.h
@@ -22,11 +22,11 @@
 
   @section description
   This file implements an abstract class to map between numbers of type IntType
-  and strings. The purpose is to obtain one representation from the other so that
+  and strings. The purpose is to obtain one representation from the other so
+  that
   easily remembered names can be used to refer to log fields of integer type.
  */
 
-
 #ifndef LOG_FIELD_ALIAS_MAP_H
 #define LOG_FIELD_ALIAS_MAP_H
 
@@ -71,7 +71,6 @@ any memory the map may have allocated.
 
  *****************************************************************************/
 
-
 class LogFieldAliasMap : public RefCountObj
 {
 public:
@@ -212,6 +211,5 @@ public:
   }
 };
 
-
 // LOG_FIELD_ALIAS_MAP_H
 #endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFile.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFile.cc b/proxy/logging/LogFile.cc
index 5e44093..708b213 100644
--- a/proxy/logging/LogFile.cc
+++ b/proxy/logging/LogFile.cc
@@ -734,7 +734,8 @@ LogFile::check_fd()
   stat_check_count++;
 
   int err = open_file();
-  // XXX if open_file() returns, LOG_FILE_FILESYSTEM_CHECKS_FAILED, raise a more informative alarm ...
+  // XXX if open_file() returns, LOG_FILE_FILESYSTEM_CHECKS_FAILED, raise a more
+  // informative alarm ...
   if (err != LOG_FILE_NO_ERROR && err != LOG_FILE_NO_PIPE_READERS) {
     if (!failure_last_call) {
       LogUtils::manager_alarm(LogUtils::LOG_ALARM_ERROR, "Traffic Server could not open logfile %s.", m_name);
@@ -757,7 +758,6 @@ LogFile::display(FILE *fd)
  LogFileList IS NOT USED
 ****************************************************************************/
 
-
 /****************************************************************************
 
   MetaInfo methods

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFile.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFile.h b/proxy/logging/LogFile.h
index 2e345a5..a8febf0 100644
--- a/proxy/logging/LogFile.h
+++ b/proxy/logging/LogFile.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_FILE_H
 #define LOG_FILE_H
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFilter.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFilter.cc b/proxy/logging/LogFilter.cc
index abdf66f..fb0d53c 100644
--- a/proxy/logging/LogFilter.cc
+++ b/proxy/logging/LogFilter.cc
@@ -68,7 +68,6 @@ LogFilter::~LogFilter()
   delete m_field;
 }
 
-
 /*-------------------------------------------------------------------------
   LogFilterString::LogFilterString
   -------------------------------------------------------------------------*/
@@ -95,7 +94,6 @@ LogFilterString::_setValues(size_t n, char **value)
   }
 }
 
-
 LogFilterString::LogFilterString(const char *name, LogField *field, LogFilter::Action action, LogFilter::Operator oper,
                                  char *values)
   : LogFilter(name, field, action, oper)
@@ -246,7 +244,6 @@ LogFilterString::wipe_this_entry(LogAccess *lad)
   return cond_satisfied;
 }
 
-
 /*-------------------------------------------------------------------------
   LogFilterString::toss_this_entry
 
@@ -655,7 +652,9 @@ LogFilterIP::LogFilterIP(const char *name, LogField *field, LogFilter::Action ac
       if (0 == min.load(t)) {
         if (x) {
           if (0 != max.load(x)) {
-            Warning("LogFilterIP Configuration: '%s-%s' looks like a range but the second address was ill formed", t, x);
+            Warning("LogFilterIP Configuration: '%s-%s' looks like a range but "
+                    "the second address was ill formed",
+                    t, x);
             continue;
           }
         } else {
@@ -744,8 +743,10 @@ LogFilterIP::is_match(LogAccess *lad)
   if (m_field && lad) {
     LogFieldIpStorage value;
     m_field->marshal(lad, reinterpret_cast<char *>(&value));
-    // This is bad, we abuse the fact that the initial layout of LogFieldIpStorage and IpAddr
-    // are identical. We should look at converting the log stuff to use IpAddr directly.
+    // This is bad, we abuse the fact that the initial layout of
+    // LogFieldIpStorage and IpAddr
+    // are identical. We should look at converting the log stuff to use IpAddr
+    // directly.
     zret = m_map.contains(reinterpret_cast<IpAddr &>(value));
   }
 
@@ -817,7 +818,6 @@ LogFilterIP::display(FILE *fd)
   }
 }
 
-
 void
 LogFilterIP::display_as_XML(FILE *fd)
 {
@@ -957,7 +957,6 @@ LogFilterList::wipe_this_entry(LogAccess *lad)
   return wipeFlag;
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
@@ -999,7 +998,6 @@ LogFilterList::find_by_name(char *name)
   return NULL;
 }
 
-
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFilter.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFilter.h b/proxy/logging/LogFilter.h
index 8a82a68..b3be370 100644
--- a/proxy/logging/LogFilter.h
+++ b/proxy/logging/LogFilter.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_FILTER_H
 #define LOG_FILTER_H
 
@@ -247,7 +246,6 @@ private:
 
 bool filters_are_equal(LogFilter *filt1, LogFilter *filt2);
 
-
 /*-------------------------------------------------------------------------
   LogFilterList
   -------------------------------------------------------------------------*/
@@ -306,7 +304,6 @@ private:
   LogFilterList &operator=(const LogFilterList &rhs);
 };
 
-
 /*-------------------------------------------------------------------------
   Inline functions
   -------------------------------------------------------------------------*/

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFormat.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFormat.cc b/proxy/logging/LogFormat.cc
index 069211c..4c4723d 100644
--- a/proxy/logging/LogFormat.cc
+++ b/proxy/logging/LogFormat.cc
@@ -95,7 +95,8 @@ LogFormat::setup(const char *name, const char *format_str, unsigned interval_sec
     return true;
   }
 
-  // We don't have a format string (ie. this will be a raw text log, so we are always valid.
+  // We don't have a format string (ie. this will be a raw text log, so we are
+  // always valid.
   m_valid = true;
   return true;
 }
@@ -188,8 +189,10 @@ LogFormat::LogFormat(const char *name, const char *format_str, unsigned interval
 {
   setup(name, format_str, interval_sec);
 
-  // A LOG_FORMAT_TEXT is a log without a format string, everything else is a LOG_FORMAT_CUSTOM. It's possible that we could get
-  // rid of log types altogether, but LogFile currently tests whether a format is a LOG_FORMAT_TEXT format ...
+  // A LOG_FORMAT_TEXT is a log without a format string, everything else is a
+  // LOG_FORMAT_CUSTOM. It's possible that we could get
+  // rid of log types altogether, but LogFile currently tests whether a format
+  // is a LOG_FORMAT_TEXT format ...
   m_format_type = format_str ? LOG_FORMAT_CUSTOM : LOG_FORMAT_TEXT;
 }
 
@@ -574,7 +577,9 @@ LogFormat::parse_escape_string(const char *str, int len)
     sum = (a - '0') * 64 + (b - '0') * 8 + (c - '0');
 
     if (sum == 0 || sum >= 255) {
-      Warning("Octal escape sequence out of range: \\%c%c%c, treat it as normal string\n", a, b, c);
+      Warning("Octal escape sequence out of range: \\%c%c%c, treat it as "
+              "normal string\n",
+              a, b, c);
       return -1;
     } else
       return sum;
@@ -594,7 +599,9 @@ LogFormat::parse_escape_string(const char *str, int len)
     sum = i * 16 + j;
 
     if (sum == 0 || sum >= 255) {
-      Warning("Hex escape sequence out of range: \\%c%c%c, treat it as normal string\n", a, b, c);
+      Warning("Hex escape sequence out of range: \\%c%c%c, treat it as normal "
+              "string\n",
+              a, b, c);
       return -1;
     } else
       return sum;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogFormat.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogFormat.h b/proxy/logging/LogFormat.h
index 39ee864..4c822eb 100644
--- a/proxy/logging/LogFormat.h
+++ b/proxy/logging/LogFormat.h
@@ -31,7 +31,8 @@
 #include "InkXml.h"
 
 enum LogFormatType {
-  // We start the numbering at 4 to compatibility with Traffic Server 4.x, which used
+  // We start the numbering at 4 to compatibility with Traffic Server 4.x, which
+  // used
   // to have the predefined log formats enumerated above ...
   LOG_FORMAT_CUSTOM = 4,
   LOG_FORMAT_TEXT = 5

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogObject.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogObject.cc b/proxy/logging/LogObject.cc
index 3a26962..1483e96 100644
--- a/proxy/logging/LogObject.cc
+++ b/proxy/logging/LogObject.cc
@@ -28,7 +28,6 @@
  ***************************************************************************/
 #include "libts.h"
 
-
 #include "Error.h"
 #include "P_EventSystem.h"
 #include "LogUtils.h"
@@ -248,7 +247,6 @@ LogObject::generate_filenames(const char *log_dir, const char *basename, LogFile
   m_basename[basename_len - 1] = 0;
 }
 
-
 void
 LogObject::rename(char *new_name)
 {
@@ -261,7 +259,6 @@ LogObject::rename(char *new_name)
   m_logFile->change_name(new_name);
 }
 
-
 void
 LogObject::add_filter(LogFilter *filter, bool copy)
 {
@@ -271,7 +268,6 @@ LogObject::add_filter(LogFilter *filter, bool copy)
   m_filter_list.add(filter, copy);
 }
 
-
 void
 LogObject::set_filter_list(const LogFilterList &list, bool copy)
 {
@@ -284,7 +280,6 @@ LogObject::set_filter_list(const LogFilterList &list, bool copy)
   m_filter_list.set_conjunction(list.does_conjunction());
 }
 
-
 void
 LogObject::add_loghost(LogHost *host, bool copy)
 {
@@ -300,7 +295,6 @@ LogObject::add_loghost(LogHost *host, bool copy)
   m_logFile.clear();
 }
 
-
 // we conpute the object signature from the fieldlist_str and the printf_str
 // of the LogFormat rather than from the format_str because the format_str
 // is not part of a LogBuffer header
@@ -328,7 +322,6 @@ LogObject::compute_signature(LogFormat *format, char *filename, unsigned int fla
   return signature;
 }
 
-
 void
 LogObject::display(FILE *fd)
 {
@@ -346,7 +339,6 @@ LogObject::display(FILE *fd)
   fprintf(fd, "++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
 }
 
-
 void
 LogObject::displayAsXML(FILE *fd, bool extended)
 {
@@ -375,7 +367,6 @@ LogObject::displayAsXML(FILE *fd, bool extended)
   fprintf(fd, "</LogObject>\n");
 }
 
-
 LogBuffer *
 LogObject::_checkout_write(size_t *write_offset, size_t bytes_needed)
 {
@@ -491,7 +482,6 @@ LogObject::_checkout_write(size_t *write_offset, size_t bytes_needed)
   return buffer;
 }
 
-
 int
 LogObject::va_log(LogAccess *lad, const char *fmt, va_list ap)
 {
@@ -631,7 +621,6 @@ LogObject::log(LogAccess *lad, const char *text_entry)
   return Log::LOG_OK;
 }
 
-
 void
 LogObject::_setup_rolling(Log::RollingEnabledValues rolling_enabled, int rolling_interval_sec, int rolling_offset_hr,
                           int rolling_size_mb)
@@ -697,7 +686,6 @@ LogObject::_setup_rolling(Log::RollingEnabledValues rolling_enabled, int rolling
   }
 }
 
-
 unsigned
 LogObject::roll_files(long time_now)
 {
@@ -757,7 +745,6 @@ LogObject::roll_files(long time_now)
   return num_rolled;
 }
 
-
 unsigned
 LogObject::_roll_files(long last_roll_time, long time_now)
 {
@@ -781,7 +768,6 @@ LogObject::_roll_files(long last_roll_time, long time_now)
   return num_rolled;
 }
 
-
 void
 LogObject::check_buffer_expiration(long time_now)
 {
@@ -791,7 +777,6 @@ LogObject::check_buffer_expiration(long time_now)
   }
 }
 
-
 // make sure that we will be able to write the logs to the disk
 //
 int
@@ -804,7 +789,6 @@ LogObject::do_filesystem_checks()
   }
 }
 
-
 /*-------------------------------------------------------------------------
   TextLogObject::TextLogObject
   -------------------------------------------------------------------------*/
@@ -821,7 +805,6 @@ TextLogObject::TextLogObject(const char *name, const char *log_dir, bool timesta
   }
 }
 
-
 /*-------------------------------------------------------------------------
   TextLogObject::write
 
@@ -845,7 +828,6 @@ TextLogObject::write(const char *format, ...)
   return ret_val;
 }
 
-
 /*-------------------------------------------------------------------------
   TextLogObject::va_write
 
@@ -861,7 +843,6 @@ TextLogObject::va_write(const char *format, va_list ap)
   return this->va_log(NULL, format, ap);
 }
 
-
 /*-------------------------------------------------------------------------
   LogObjectManager
   -------------------------------------------------------------------------*/
@@ -949,7 +930,6 @@ LogObjectManager::_manage_object(LogObject *log_object, bool is_api_object, int
   return retVal;
 }
 
-
 int
 LogObjectManager::_solve_filename_conflicts(LogObject *log_object, int maxConflicts)
 {
@@ -1055,7 +1035,6 @@ LogObjectManager::_solve_filename_conflicts(LogObject *log_object, int maxConfli
   return retVal;
 }
 
-
 bool
 LogObjectManager::_has_internal_filename_conflict(const char *filename, LogObjectList &objects)
 {
@@ -1073,7 +1052,6 @@ LogObjectManager::_has_internal_filename_conflict(const char *filename, LogObjec
   return false;
 }
 
-
 int
 LogObjectManager::_solve_internal_filename_conflicts(LogObject *log_object, int maxConflicts, int fileNum)
 {
@@ -1098,7 +1076,6 @@ LogObjectManager::_solve_internal_filename_conflicts(LogObject *log_object, int
   return retVal;
 }
 
-
 LogObject *
 LogObjectManager::get_object_with_signature(uint64_t signature)
 {
@@ -1112,7 +1089,6 @@ LogObjectManager::get_object_with_signature(uint64_t signature)
   return NULL;
 }
 
-
 void
 LogObjectManager::check_buffer_expiration(long time_now)
 {
@@ -1149,7 +1125,6 @@ LogObjectManager::preproc_buffers(int idx)
   return buffers_preproced;
 }
 
-
 bool
 LogObjectManager::unmanage_api_object(LogObject *logObject)
 {
@@ -1158,7 +1133,8 @@ LogObjectManager::unmanage_api_object(LogObject *logObject)
   if (this->_APIobjects.in(logObject)) {
     this->_APIobjects.remove(logObject);
 
-    // Force a buffer flush, then schedule this LogObject to be deleted on the eventProcessor.
+    // Force a buffer flush, then schedule this LogObject to be deleted on the
+    // eventProcessor.
     logObject->force_new_buffer();
     new_Derefer(logObject, HRTIME_SECONDS(60));
 
@@ -1170,7 +1146,6 @@ LogObjectManager::unmanage_api_object(LogObject *logObject)
   return false;
 }
 
-
 void
 LogObjectManager::add_filter_to_all(LogFilter *filter)
 {
@@ -1179,7 +1154,6 @@ LogObjectManager::add_filter_to_all(LogFilter *filter)
   }
 }
 
-
 void
 LogObjectManager::open_local_pipes()
 {
@@ -1194,7 +1168,6 @@ LogObjectManager::open_local_pipes()
   }
 }
 
-
 void
 LogObjectManager::transfer_objects(LogObjectManager &old_mgr)
 {
@@ -1214,7 +1187,8 @@ LogObjectManager::transfer_objects(LogObjectManager &old_mgr)
     }
   }
 
-  // Transfer the API objects from the old manager. The old manager will retain its refcount.
+  // Transfer the API objects from the old manager. The old manager will retain
+  // its refcount.
   for (unsigned i = 0; i < old_mgr._APIobjects.length(); ++i) {
     manage_api_object(old_mgr._APIobjects[i]);
   }
@@ -1225,8 +1199,10 @@ LogObjectManager::transfer_objects(LogObjectManager &old_mgr)
 
     Debug("log-config-transfer", "examining existing object %s", old_obj->get_base_filename());
 
-    // See if any of the new objects is just a copy of an old one. If so, transfer the
-    // old one to the new manager and delete the new one. We don't use Vec::in here because
+    // See if any of the new objects is just a copy of an old one. If so,
+    // transfer the
+    // old one to the new manager and delete the new one. We don't use Vec::in
+    // here because
     // we need to compare the object hash, not the pointers.
     for (unsigned j = 0; j < _objects.length(); j++) {
       new_obj = _objects[j];
@@ -1389,8 +1365,10 @@ REGRESSION_TEST(LogObjectManager_Transfer)(RegressionTest *t, int /* atype ATS_U
 {
   TestBox box(t, pstatus);
 
-  // There used to be a lot of confusion around whether LogObjects were owned by ome or more LogObjectManager
-  // objects, or handed off to static storage in the Log class. This test just verifies that this is no longer
+  // There used to be a lot of confusion around whether LogObjects were owned by
+  // ome or more LogObjectManager
+  // objects, or handed off to static storage in the Log class. This test just
+  // verifies that this is no longer
   // the case.
   {
     LogObjectManager mgr1;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogObject.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogObject.h b/proxy/logging/LogObject.h
index 649ad4f..f9f9892 100644
--- a/proxy/logging/LogObject.h
+++ b/proxy/logging/LogObject.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_OBJECT_H
 #define LOG_OBJECT_H
 
@@ -83,7 +82,8 @@ public:
   size_t preproc_buffers(LogBufferSink *sink);
 };
 
-// LogObject is atomically reference counted, and the reference count is always owned by
+// LogObject is atomically reference counted, and the reference count is always
+// owned by
 // one or more LogObjectManagers.
 class LogObject : public RefCountObj
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogSock.h
----------------------------------------------------------------------
diff --git a/proxy/logging/LogSock.h b/proxy/logging/LogSock.h
index a80cf2b..40bcd3a 100644
--- a/proxy/logging/LogSock.h
+++ b/proxy/logging/LogSock.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef LOG_SOCK_H
 #define LOG_SOCK_H
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogStandalone.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogStandalone.cc b/proxy/logging/LogStandalone.cc
index ebac47c..b530b32 100644
--- a/proxy/logging/LogStandalone.cc
+++ b/proxy/logging/LogStandalone.cc
@@ -61,7 +61,6 @@ char error_tags[1024] = "";
 char action_tags[1024] = "";
 char command_string[512] = "";
 
-
 // Diags *diags = NULL;
 DiagsConfig *diagsConfig = NULL;
 HttpBodyFactory *body_factory = NULL;
@@ -138,7 +137,6 @@ initialize_process_manager()
   //                         RECP_NON_PERSISTENT);
 }
 
-
 /*-------------------------------------------------------------------------
   check_lockfile
   -------------------------------------------------------------------------*/

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/LogUtils.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/LogUtils.cc b/proxy/logging/LogUtils.cc
index 4099f54..77af53d 100644
--- a/proxy/logging/LogUtils.cc
+++ b/proxy/logging/LogUtils.cc
@@ -46,7 +46,6 @@
 #include "LogUtils.h"
 #include "LogLimits.h"
 
-
 /*-------------------------------------------------------------------------
   LogUtils::timestamp_to_str
 
@@ -394,11 +393,11 @@ LogUtils::remove_content_type_attributes(char *type_str, int *type_len)
   }
 }
 
-
 /*-------------------------------------------------------------------------
   LogUtils::timestamp_to_hex_str
 
-  This routine simply writes the given timestamp integer [time_t] in the equivalent
+  This routine simply writes the given timestamp integer [time_t] in the
+  equivalent
   hexadecimal string format "xxxxxxxxxx" into the provided buffer [buf] of
   size [bufLen].
 
@@ -457,7 +456,6 @@ LogUtils::seconds_to_next_roll(time_t time_now, int rolling_offset, int rolling_
   return ((tr >= sidl ? (tr - sidl) % rolling_interval : (86400 - (sidl - tr)) % rolling_interval));
 }
 
-
 // Checks if the file pointed to by full_filename either is a regular
 // file or a pipe and has write permission, or, if the file does not
 // exist, if the path prefix of full_filename names a directory that

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logstats.cc
----------------------------------------------------------------------
diff --git a/proxy/logstats.cc b/proxy/logstats.cc
index 7c24eb3..e0b64af 100644
--- a/proxy/logstats.cc
+++ b/proxy/logstats.cc
@@ -69,7 +69,6 @@ const int DEFAULT_LINE_LEN = 78;
 const double LOG10_1024 = 3.0102999566398116;
 const int MAX_ORIG_STRING = 4096;
 
-
 // Optimizations for "strcmp()", treat some fixed length (3 or 4 bytes) strings
 // as integers.
 const int GET_AS_INT = 5522759;
@@ -103,7 +102,6 @@ struct LastState {
 };
 static LastState last_state;
 
-
 // Store the collected counters and stats, per Origin Server, URL or total
 struct StatsCounter {
   int64_t count;
@@ -446,7 +444,8 @@ public:
         }
         ats_free(const_cast<char *>(l->url)); // We no longer own this string.
       } else {
-        l = _stack.insert(l, UrlStats()); // This seems faster than having a static "template" ...
+        l = _stack.insert(l, UrlStats()); // This seems faster than having a
+                                          // static "template" ...
       }
 
       // Setup this URL stat
@@ -554,7 +553,6 @@ private:
   LruStack::iterator _cur;
 };
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Globals, holding the accumulated stats (ok, I'm lazy ...)
 static OriginStats totals;
@@ -671,7 +669,6 @@ CommandLineArgs::parse_arguments(const char **argv)
   }
 }
 
-
 // Enum for return code levels.
 enum ExitLevel {
   EXIT_OK = 0,
@@ -708,7 +705,6 @@ struct ExitStatus {
   }
 };
 
-
 // Enum for parsing a log line
 enum ParseStates {
   P_STATE_ELAPSED,
@@ -748,7 +744,6 @@ enum URLScheme {
   SCHEME_OTHER,
 };
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Initialize the elapsed field
 inline void
@@ -903,7 +898,6 @@ update_results_elapsed(OriginStats *stat, int result, int elapsed, int size)
   }
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Update the "codes" stats for a particular record
 inline void
@@ -1051,7 +1045,6 @@ update_codes(OriginStats *stat, int code, int size)
     update_counter(stat->codes.c_2xx, size);
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Update the "methods" stats for a particular record
 inline void
@@ -1105,7 +1098,6 @@ update_methods(OriginStats *stat, int method, int size)
   }
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Update the "schemes" stats for a particular record
 inline void
@@ -1121,7 +1113,6 @@ update_schemes(OriginStats *stat, int scheme, int size)
     update_counter(stat->schemes.other, size);
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Parse a log buffer
 int
@@ -1296,7 +1287,8 @@ parse_log_buff(LogBufferHeader *buf_header, bool summary = false)
           if (ptr && !summary) { // Find the origin
             *ptr = '\0';
 
-            // TODO: If we save state (struct) for a run, we probably need to always
+            // TODO: If we save state (struct) for a run, we probably need to
+            // always
             // update the origin data, no matter what the origin_set is.
             if (origin_set->empty() || (origin_set->find(tok) != origin_set->end())) {
               o_iter = origins.find(tok);
@@ -1591,7 +1583,6 @@ parse_log_buff(LogBufferHeader *buf_header, bool summary = false)
   return 0;
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Process a file (FD)
 int
@@ -1655,8 +1646,9 @@ process_file(int in_fd, off_t offset, unsigned max_age)
     unsigned second_read_size = sizeof(LogBufferHeader) - first_read_size;
     nread = read(in_fd, &buffer[first_read_size], second_read_size);
     if (!nread || EOF == nread) {
-      Debug("logstats", "Second read of header failed (attemped %d bytes at offset %d, got nothing), errno=%d.", second_read_size,
-            first_read_size, errno);
+      Debug("logstats", "Second read of header failed (attemped %d bytes at "
+                        "offset %d, got nothing), errno=%d.",
+            second_read_size, first_read_size, errno);
       return 1;
     }
 
@@ -1674,12 +1666,15 @@ process_file(int in_fd, off_t offset, unsigned max_age)
 
     const int MAX_READ_TRIES = 5;
     int total_read = 0;
-    int read_tries_remaining = MAX_READ_TRIES; // since the data will be old anyway, let's only try a few times.
+    int read_tries_remaining = MAX_READ_TRIES; // since the data will be old
+                                               // anyway, let's only try a few
+                                               // times.
     do {
       nread = read(in_fd, &buffer[sizeof(LogBufferHeader) + total_read], buffer_bytes - total_read);
       if (EOF == nread || !nread) { // just bail on error
-        Debug("logstats", "Read failed while reading log buffer, wanted %d bytes, nread=%d, errno=%d", buffer_bytes - total_read,
-              nread, errno);
+        Debug("logstats", "Read failed while reading log buffer, wanted %d "
+                          "bytes, nread=%d, errno=%d",
+              buffer_bytes - total_read, nread, errno);
         return 1;
       } else {
         total_read += nread;
@@ -1692,8 +1687,9 @@ process_file(int in_fd, off_t offset, unsigned max_age)
           return 1;
         }
         // let's wait until we get more data on this file descriptor
-        Debug("logstats_partial_read",
-              "Failed to read buffer payload [%d bytes], total_read=%d, buffer_bytes=%d, tries_remaining=%d",
+        Debug("logstats_partial_read", "Failed to read buffer payload [%d "
+                                       "bytes], total_read=%d, "
+                                       "buffer_bytes=%d, tries_remaining=%d",
               buffer_bytes - total_read, total_read, buffer_bytes, read_tries_remaining);
         usleep(50 * 1000); // wait 50ms
       }
@@ -1713,7 +1709,6 @@ process_file(int in_fd, off_t offset, unsigned max_age)
   return 0;
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Determine if this "stat" (Origin Server) is worthwhile to produce a
 // report for.
@@ -1723,7 +1718,6 @@ use_origin(const OriginStats *stat)
   return ((stat->total.count > cl.min_hits) && (NULL != strchr(stat->server, '.')) && (NULL == strchr(stat->server, '%')));
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Produce a nicely formatted output for a stats collection on a stream
 inline void
@@ -1786,7 +1780,6 @@ format_elapsed_line(const char *desc, const ElapsedStats &stat, bool json = fals
   }
 }
 
-
 void
 format_detail_header(const char *desc)
 {
@@ -1829,7 +1822,6 @@ format_line(const char *desc, const StatsCounter &stat, const StatsCounter &tota
   }
 }
 
-
 // Little "helpers" for the vector we use to sort the Origins.
 typedef pair<const char *, OriginStats *> OriginPair;
 inline bool operator<(const OriginPair &a, const OriginPair &b)
@@ -2068,7 +2060,6 @@ print_detail_stats(const OriginStats *stat, bool json = false)
   }
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // Little wrapper around exit, to allow us to exit gracefully
 void
@@ -2140,7 +2131,8 @@ my_exit(const ExitStatus &status)
     }
   }
 
-  // Next the totals for all Origins, unless we specified a list of origins to filter.
+  // Next the totals for all Origins, unless we specified a list of origins to
+  // filter.
   if (origin_set->empty()) {
     first = false;
     if (cl.json) {
@@ -2213,7 +2205,6 @@ open_main_log(ExitStatus &status)
   return main_fd;
 }
 
-
 ///////////////////////////////////////////////////////////////////////////////
 // main
 int


[7/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/CacheRead.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/CacheRead.cc b/iocore/cache/CacheRead.cc
index e8ff804..4523c2a 100644
--- a/iocore/cache/CacheRead.cc
+++ b/iocore/cache/CacheRead.cc
@@ -48,14 +48,14 @@ Cache::open_read(Continuation *cont, const CacheKey *key, CacheFragType type, co
     CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
     if (!lock.is_locked() || (od = vol->open_read(key)) || dir_probe(key, vol, &result, &last_collision)) {
       c = new_CacheVC(cont);
-      SET_CONTINUATION_HANDLER(c, &CacheVC::openReadStartHead);
+      c->vol = vol;
+      c->first_key = c->key = c->earliest_key = *key;
       c->vio.op = VIO::READ;
       c->base_stat = cache_read_active_stat;
-      CACHE_INCREMENT_DYN_STAT(c->base_stat + CACHE_STAT_ACTIVE);
-      c->first_key = c->key = c->earliest_key = *key;
-      c->vol = vol;
-      c->frag_type = type;
       c->od = od;
+      c->frag_type = type;
+      CACHE_INCREMENT_DYN_STAT(c->base_stat + CACHE_STAT_ACTIVE);
+      SET_CONTINUATION_HANDLER(c, &CacheVC::openReadStartHead);
     }
     if (!c)
       goto Lmiss;
@@ -91,7 +91,46 @@ Lcallreturn:
   return &c->_action;
 }
 
-#ifdef HTTP_CACHE
+
+Action *
+Cache::open_read(Continuation *cont, CacheVConnection *vc, HTTPHdr *client_request_hdr)
+{
+  Action *zret = ACTION_RESULT_DONE;
+
+  CacheVC *write_vc = dynamic_cast<CacheVC *>(vc);
+  if (write_vc) {
+    Vol *vol = write_vc->vol;
+    ProxyMutex *mutex = cont->mutex; // needed for stat macros
+    CacheVC *c = new_CacheVC(cont);
+
+    c->vol = write_vc->vol;
+    c->first_key = write_vc->first_key;
+    // [amc] Need to fix this as it's pointless. In general @a earliest_key in the write VC
+    // won't be the correct value - it's randomly generated and for a partial fill won't be
+    // set to the actual alternate value until later (in @c set_http_info).
+    c->earliest_key = c->key = write_vc->earliest_key;
+    c->vio.op = VIO::READ;
+    c->base_stat = cache_read_active_stat;
+    c->od = write_vc->od;
+    c->frag_type = write_vc->frag_type;
+    CACHE_INCREMENT_DYN_STAT(c->base_stat + CACHE_STAT_ACTIVE);
+    //    write_vc->alternate.request_get(&c->request);
+    //    client_request_hdr->copy_shallow(&c->request);
+    c->request.copy_shallow(client_request_hdr);
+    c->params = write_vc->params; // seems to be a no-op, always NULL.
+    c->dir = c->first_dir = write_vc->first_dir;
+    c->write_vc = write_vc;
+    c->first_buf = write_vc->first_buf; // I don't think this is effective either.
+    SET_CONTINUATION_HANDLER(c, &CacheVC::openReadFromWriter);
+    zret = &c->_action; // default, override if needed.
+    CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
+    if (lock.is_locked() && c->handleEvent(EVENT_IMMEDIATE, 0) == EVENT_DONE) {
+      zret = ACTION_RESULT_DONE;
+    }
+  }
+  return zret;
+}
+
 Action *
 Cache::open_read(Continuation *cont, const CacheKey *key, CacheHTTPHdr *request, CacheLookupHttpConfig *params, CacheFragType type,
                  const char *hostname, int host_len)
@@ -112,15 +151,15 @@ Cache::open_read(Continuation *cont, const CacheKey *key, CacheHTTPHdr *request,
     CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
     if (!lock.is_locked() || (od = vol->open_read(key)) || dir_probe(key, vol, &result, &last_collision)) {
       c = new_CacheVC(cont);
-      c->first_key = c->key = c->earliest_key = *key;
       c->vol = vol;
+      c->first_key = c->key = c->earliest_key = *key;
       c->vio.op = VIO::READ;
       c->base_stat = cache_read_active_stat;
+      c->od = od;
+      c->frag_type = CACHE_FRAG_TYPE_HTTP;
       CACHE_INCREMENT_DYN_STAT(c->base_stat + CACHE_STAT_ACTIVE);
       c->request.copy_shallow(request);
-      c->frag_type = CACHE_FRAG_TYPE_HTTP;
       c->params = params;
-      c->od = od;
     }
     if (!lock.is_locked()) {
       SET_CONTINUATION_HANDLER(c, &CacheVC::openReadStartHead);
@@ -160,27 +199,55 @@ Lcallreturn:
     return ACTION_RESULT_DONE;
   return &c->_action;
 }
-#endif
 
 uint32_t
 CacheVC::load_http_info(CacheHTTPInfoVector *info, Doc *doc, RefCountObj *block_ptr)
 {
   uint32_t zret = info->get_handles(doc->hdr(), doc->hlen, block_ptr);
-  if (cache_config_compatibility_4_2_0_fixup && // manual override not engaged
+  if (zret != static_cast<uint32_t>(-1) &&      // Make sure we haven't already failed
+      cache_config_compatibility_4_2_0_fixup && // manual override not engaged
       !this->f.doc_from_ram_cache &&            // it's already been done for ram cache fragments
       vol->header->version.ink_major == 23 && vol->header->version.ink_minor == 0) {
     for (int i = info->xcount - 1; i >= 0; --i) {
-      info->data(i).alternate.m_alt->m_response_hdr.m_mime->recompute_accelerators_and_presence_bits();
-      info->data(i).alternate.m_alt->m_request_hdr.m_mime->recompute_accelerators_and_presence_bits();
+      info->data(i)._alternate.m_alt->m_response_hdr.m_mime->recompute_accelerators_and_presence_bits();
+      info->data(i)._alternate.m_alt->m_request_hdr.m_mime->recompute_accelerators_and_presence_bits();
     }
   }
   return zret;
 }
 
+char const *
+CacheVC::get_http_range_boundary_string(int *len) const
+{
+  return resp_range.getBoundaryStr(len);
+}
+
+int64_t
+CacheVC::get_effective_content_size()
+{
+  return resp_range.hasRanges() ? resp_range.calcContentLength() : alternate.object_size_get();
+}
+
+int
+CacheVC::closeReadAndFree(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
+{
+  //  cancel_trigger(); // ??
+  if (od) {
+    CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
+    if (!lock.is_locked()) {
+      SET_HANDLER(&CacheVC::closeReadAndFree);
+      VC_SCHED_LOCK_RETRY();
+    }
+    vol->close_read(this);
+  }
+  return free_CacheVC(this);
+}
+
 int
 CacheVC::openReadFromWriterFailure(int event, Event *e)
 {
-  od = NULL;
+  // od = NULL;
+  vol->close_read(this);
   vector.clear(false);
   CACHE_INCREMENT_DYN_STAT(cache_read_failure_stat);
   CACHE_INCREMENT_DYN_STAT(cache_read_busy_failure_stat);
@@ -190,105 +257,6 @@ CacheVC::openReadFromWriterFailure(int event, Event *e)
 }
 
 int
-CacheVC::openReadChooseWriter(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
-{
-  intptr_t err = ECACHE_DOC_BUSY;
-  CacheVC *w = NULL;
-
-  ink_assert(vol->mutex->thread_holding == mutex->thread_holding && write_vc == NULL);
-
-  if (!od)
-    return EVENT_RETURN;
-
-  if (frag_type != CACHE_FRAG_TYPE_HTTP) {
-    ink_assert(od->num_writers == 1);
-    w = od->writers.head;
-    if (w->start_time > start_time || w->closed < 0) {
-      od = NULL;
-      return EVENT_RETURN;
-    }
-    if (!w->closed)
-      return -err;
-    write_vc = w;
-  }
-#ifdef HTTP_CACHE
-  else {
-    write_vector = &od->vector;
-    int write_vec_cnt = write_vector->count();
-    for (int c = 0; c < write_vec_cnt; c++)
-      vector.insert(write_vector->get(c));
-    // check if all the writers who came before this reader have
-    // set the http_info.
-    for (w = (CacheVC *)od->writers.head; w; w = (CacheVC *)w->opendir_link.next) {
-      if (w->start_time > start_time || w->closed < 0)
-        continue;
-      if (!w->closed && !cache_config_read_while_writer) {
-        return -err;
-      }
-      if (w->alternate_index != CACHE_ALT_INDEX_DEFAULT)
-        continue;
-
-      if (!w->closed && !w->alternate.valid()) {
-        od = NULL;
-        ink_assert(!write_vc);
-        vector.clear(false);
-        return EVENT_CONT;
-      }
-      // construct the vector from the writers.
-      int alt_ndx = CACHE_ALT_INDEX_DEFAULT;
-      if (w->f.update) {
-        // all Update cases. Need to get the alternate index.
-        alt_ndx = get_alternate_index(&vector, w->update_key);
-        // if its an alternate delete
-        if (!w->alternate.valid()) {
-          if (alt_ndx >= 0)
-            vector.remove(alt_ndx, false);
-          continue;
-        }
-      }
-      ink_assert(w->alternate.valid());
-      if (w->alternate.valid())
-        vector.insert(&w->alternate, alt_ndx);
-    }
-
-    if (!vector.count()) {
-      if (od->reading_vec) {
-        // the writer(s) are reading the vector, so there is probably
-        // an old vector. Since this reader came before any of the
-        // current writers, we should return the old data
-        od = NULL;
-        return EVENT_RETURN;
-      }
-      return -ECACHE_NO_DOC;
-    }
-    if (cache_config_select_alternate) {
-      alternate_index = HttpTransactCache::SelectFromAlternates(&vector, &request, params);
-      if (alternate_index < 0)
-        return -ECACHE_ALT_MISS;
-    } else
-      alternate_index = 0;
-    CacheHTTPInfo *obj = vector.get(alternate_index);
-    for (w = (CacheVC *)od->writers.head; w; w = (CacheVC *)w->opendir_link.next) {
-      if (obj->m_alt == w->alternate.m_alt) {
-        write_vc = w;
-        break;
-      }
-    }
-    vector.clear(false);
-    if (!write_vc) {
-      DDebug("cache_read_agg", "%p: key: %X writer alternate different: %d", this, first_key.slice32(1), alternate_index);
-      od = NULL;
-      return EVENT_RETURN;
-    }
-
-    DDebug("cache_read_agg", "%p: key: %X eKey: %d # alts: %d, ndx: %d, # writers: %d writer: %p", this, first_key.slice32(1),
-           write_vc->earliest_key.slice32(1), vector.count(), alternate_index, od->num_writers, write_vc);
-  }
-#endif // HTTP_CACHE
-  return EVENT_NONE;
-}
-
-int
 CacheVC::openReadFromWriter(int event, Event *e)
 {
   if (!f.read_from_writer_called) {
@@ -304,167 +272,70 @@ CacheVC::openReadFromWriter(int event, Event *e)
     f.read_from_writer_called = 1;
   }
   cancel_trigger();
-  intptr_t err = ECACHE_DOC_BUSY;
   DDebug("cache_read_agg", "%p: key: %X In openReadFromWriter", this, first_key.slice32(1));
-#ifndef READ_WHILE_WRITER
-  return openReadFromWriterFailure(CACHE_EVENT_OPEN_READ_FAILED, (Event *)-err);
-#else
+
   if (_action.cancelled) {
-    od = NULL; // only open for read so no need to close
-    return free_CacheVC(this);
+    return this->closeReadAndFree(0, NULL);
+    //    od = NULL; // only open for read so no need to close
+    //    return free_CacheVC(this);
   }
   CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
   if (!lock.is_locked())
     VC_SCHED_LOCK_RETRY();
-  od = vol->open_read(&first_key); // recheck in case the lock failed
-  if (!od) {
+  if (!od && NULL == (od = vol->open_read(&first_key))) {
     MUTEX_RELEASE(lock);
     write_vc = NULL;
     SET_HANDLER(&CacheVC::openReadStartHead);
     return openReadStartHead(event, e);
-  } else
-    ink_assert(od == vol->open_read(&first_key));
-  if (!write_vc) {
-    int ret = openReadChooseWriter(event, e);
-    if (ret < 0) {
-      MUTEX_RELEASE(lock);
-      SET_HANDLER(&CacheVC::openReadFromWriterFailure);
-      return openReadFromWriterFailure(CACHE_EVENT_OPEN_READ_FAILED, reinterpret_cast<Event *>(ret));
-    } else if (ret == EVENT_RETURN) {
-      MUTEX_RELEASE(lock);
-      SET_HANDLER(&CacheVC::openReadStartHead);
-      return openReadStartHead(event, e);
-    } else if (ret == EVENT_CONT) {
-      ink_assert(!write_vc);
-      VC_SCHED_WRITER_RETRY();
-    } else
-      ink_assert(write_vc);
-  } else {
-    if (writer_done()) {
-      MUTEX_RELEASE(lock);
-      DDebug("cache_read_agg", "%p: key: %X writer %p has left, continuing as normal read", this, first_key.slice32(1), write_vc);
-      od = NULL;
-      write_vc = NULL;
-      SET_HANDLER(&CacheVC::openReadStartHead);
-      return openReadStartHead(event, e);
-    }
-  }
-#ifdef HTTP_CACHE
-  OpenDirEntry *cod = od;
-#endif
-  od = NULL;
-  // someone is currently writing the document
-  if (write_vc->closed < 0) {
-    MUTEX_RELEASE(lock);
-    write_vc = NULL;
-    // writer aborted, continue as if there is no writer
-    SET_HANDLER(&CacheVC::openReadStartHead);
-    return openReadStartHead(EVENT_IMMEDIATE, 0);
-  }
-  // allow reading from unclosed writer for http requests only.
-  ink_assert(frag_type == CACHE_FRAG_TYPE_HTTP || write_vc->closed);
-  if (!write_vc->closed && !write_vc->fragment) {
-    if (!cache_config_read_while_writer || frag_type != CACHE_FRAG_TYPE_HTTP ||
-        writer_lock_retry >= cache_config_read_while_writer_max_retries) {
-      MUTEX_RELEASE(lock);
-      return openReadFromWriterFailure(CACHE_EVENT_OPEN_READ_FAILED, (Event *)-err);
-    }
-    DDebug("cache_read_agg", "%p: key: %X writer: closed:%d, fragment:%d, retry: %d", this, first_key.slice32(1), write_vc->closed,
-           write_vc->fragment, writer_lock_retry);
-    VC_SCHED_WRITER_RETRY();
   }
 
-  CACHE_TRY_LOCK(writer_lock, write_vc->mutex, mutex->thread_holding);
-  if (!writer_lock.is_locked()) {
-    DDebug("cache_read_agg", "%p: key: %X lock miss", this, first_key.slice32(1));
+  CACHE_TRY_LOCK(lock_od, od->mutex, mutex->thread_holding);
+  if (!lock_od.is_locked())
     VC_SCHED_LOCK_RETRY();
-  }
-  MUTEX_RELEASE(lock);
 
-  if (!write_vc->io.ok())
-    return openReadFromWriterFailure(CACHE_EVENT_OPEN_READ_FAILED, (Event *)-err);
-#ifdef HTTP_CACHE
-  if (frag_type == CACHE_FRAG_TYPE_HTTP) {
-    DDebug("cache_read_agg", "%p: key: %X http passed stage 1, closed: %d, frag: %d", this, first_key.slice32(1), write_vc->closed,
-           write_vc->fragment);
-    if (!write_vc->alternate.valid())
-      return openReadFromWriterFailure(CACHE_EVENT_OPEN_READ_FAILED, (Event *)-err);
-    alternate.copy(&write_vc->alternate);
-    vector.insert(&alternate);
-    alternate.object_key_get(&key);
-    write_vc->f.readers = 1;
-    if (!(write_vc->f.update && write_vc->total_len == 0)) {
-      key = write_vc->earliest_key;
-      if (!write_vc->closed)
-        alternate.object_size_set(write_vc->vio.nbytes);
-      else
-        alternate.object_size_set(write_vc->total_len);
-    } else {
-      key = write_vc->update_key;
-      ink_assert(write_vc->closed);
-      DDebug("cache_read_agg", "%p: key: %X writer header update", this, first_key.slice32(1));
-      // Update case (b) : grab doc_len from the writer's alternate
-      doc_len = alternate.object_size_get();
-      if (write_vc->update_key == cod->single_doc_key && (cod->move_resident_alt || write_vc->f.rewrite_resident_alt) &&
-          write_vc->first_buf._ptr()) {
-        // the resident alternate is being updated and its a
-        // header only update. The first_buf of the writer has the
-        // document body.
-        Doc *doc = (Doc *)write_vc->first_buf->data();
-        writer_buf = new_IOBufferBlock(write_vc->first_buf, doc->data_len(), doc->prefix_len());
-        MUTEX_RELEASE(writer_lock);
-        ink_assert(doc_len == doc->data_len());
-        length = doc_len;
-        f.single_fragment = 1;
-        doc_pos = 0;
-        earliest_key = key;
-        dir_clean(&first_dir);
-        dir_clean(&earliest_dir);
-        SET_HANDLER(&CacheVC::openReadFromWriterMain);
-        CACHE_INCREMENT_DYN_STAT(cache_read_busy_success_stat);
-        return callcont(CACHE_EVENT_OPEN_READ);
-      }
-      // want to snarf the new headers from the writer
-      // and then continue as if nothing happened
-      last_collision = NULL;
-      MUTEX_RELEASE(writer_lock);
-      SET_HANDLER(&CacheVC::openReadStartEarliest);
-      return openReadStartEarliest(event, e);
+  if (od->open_writer) {
+    // Alternates are in flux, wait for origin server response to update them.
+    if (!od->open_waiting.in(this)) {
+      wake_up_thread = mutex->thread_holding;
+      od->open_waiting.push(this);
     }
-  } else {
-#endif // HTTP_CACHE
-    DDebug("cache_read_agg", "%p: key: %X non-http passed stage 1", this, first_key.slice32(1));
-    key = write_vc->earliest_key;
-#ifdef HTTP_CACHE
+    Debug("amc", "[CacheVC::openReadFromWriter] waiting for %p", od->open_writer);
+    return EVENT_CONT; // wait for the writer to wake us up.
   }
-#endif
-  if (write_vc->fragment) {
-    doc_len = write_vc->vio.nbytes;
-    last_collision = NULL;
-    DDebug("cache_read_agg", "%p: key: %X closed: %d, fragment: %d, len: %d starting first fragment", this, first_key.slice32(1),
-           write_vc->closed, write_vc->fragment, (int)doc_len);
-    MUTEX_RELEASE(writer_lock);
-    // either a header + body update or a new document
+
+  MUTEX_RELEASE(lock); // we have the OD lock now, don't need the vol lock.
+
+  if (write_vc && CACHE_ALT_INDEX_DEFAULT != (alternate_index = get_alternate_index(&(od->vector), write_vc->earliest_key))) {
+    // Found the alternate for our write VC. Really, though, if we have a write_vc we should never fail to get
+    // the alternate - we should probably check for that.
+    alternate.copy_shallow(od->vector.get(alternate_index));
+    key = earliest_key = alternate.object_key_get();
+    doc_len = alternate.object_size_get();
+    Debug("amc", "[openReadFromWriter] - setting alternate from write_vc %p to #%d : %p", write_vc, alternate_index,
+          alternate.m_alt);
+    MUTEX_RELEASE(lock_od);
     SET_HANDLER(&CacheVC::openReadStartEarliest);
     return openReadStartEarliest(event, e);
+  } else {
+    if (cache_config_select_alternate) {
+      alternate_index = HttpTransactCache::SelectFromAlternates(&od->vector, &request, params);
+      if (alternate_index < 0) {
+        MUTEX_RELEASE(lock_od);
+        SET_HANDLER(&CacheVC::openReadFromWriterFailure);
+        return openReadFromWriterFailure(CACHE_EVENT_OPEN_READ_FAILED, reinterpret_cast<Event *>(-ECACHE_ALT_MISS));
+      }
+      Debug("amc", "[openReadFromWriter] select alt: %d %p (current %p)", alternate_index, od->vector.get(alternate_index)->m_alt,
+            alternate.m_alt);
+      write_vector = &od->vector;
+    } else {
+      alternate_index = 0;
+    }
+    MUTEX_RELEASE(lock_od);
+    SET_HANDLER(&CacheVC::openReadStartHead);
+    return openReadStartHead(event, e);
   }
-  writer_buf = write_vc->blocks;
-  writer_offset = write_vc->offset;
-  length = write_vc->length;
-  // copy the vector
-  f.single_fragment = !write_vc->fragment; // single fragment doc
-  doc_pos = 0;
-  earliest_key = write_vc->earliest_key;
-  ink_assert(earliest_key == key);
-  doc_len = write_vc->total_len;
-  dir_clean(&first_dir);
-  dir_clean(&earliest_dir);
-  DDebug("cache_read_agg", "%p: key: %X %X: single fragment read", this, first_key.slice32(1), key.slice32(0));
-  MUTEX_RELEASE(writer_lock);
-  SET_HANDLER(&CacheVC::openReadFromWriterMain);
-  CACHE_INCREMENT_DYN_STAT(cache_read_busy_success_stat);
-  return callcont(CACHE_EVENT_OPEN_READ);
-#endif // READ_WHILE_WRITER
+  ink_assert(false);
+  return EVENT_DONE; // should not get here.
 }
 
 int
@@ -575,6 +446,8 @@ CacheVC::openReadReadDone(int event, Event *e)
         goto Lcallreturn;
       return EVENT_CONT;
     } else if (write_vc) {
+      ink_release_assert(!"[amc] Handle this");
+#if 0
       if (writer_done()) {
         last_collision = NULL;
         while (dir_probe(&earliest_key, vol, &dir, &last_collision)) {
@@ -589,6 +462,7 @@ CacheVC::openReadReadDone(int event, Event *e)
       }
       DDebug("cache_read_agg", "%p: key: %X ReadRead retrying: %d", this, first_key.slice32(1), (int)vio.ndone);
       VC_SCHED_WRITER_RETRY(); // wait for writer
+#endif
     }
     // fall through for truncated documents
   }
@@ -596,126 +470,144 @@ Lerror:
   char tmpstring[100];
   Warning("Document %s truncated", earliest_key.toHexStr(tmpstring));
   return calluser(VC_EVENT_ERROR);
-Ldone:
+  // Ldone:
   return calluser(VC_EVENT_EOS);
 Lcallreturn:
   return handleEvent(AIO_EVENT_DONE, 0);
 LreadMain:
-  fragment++;
+  ++fragment;
   doc_pos = doc->prefix_len();
+  doc_pos += resp_range.getOffset() - frag_upper_bound; // used before update!
+  frag_upper_bound += doc->data_len();
   next_CacheKey(&key, &key);
   SET_HANDLER(&CacheVC::openReadMain);
   return openReadMain(event, e);
 }
 
+void
+CacheVC::update_key_to_frag_idx(int target)
+{
+  if (0 == target) {
+    fragment = 0;
+    key = earliest_key;
+  } else {
+    FragmentDescriptor *frag = alternate.force_frag_at(target);
+    ink_assert(frag);
+    key = frag->m_key;
+  }
+}
+
+int
+CacheVC::frag_idx_for_offset(uint64_t offset)
+{
+  FragmentDescriptorTable *frags = alternate.get_frag_table();
+  int count = alternate.get_frag_count();
+  uint32_t ffs = alternate.get_frag_fixed_size();
+  int idx = count / 2;
+
+  ink_assert(offset < doc_len);
+
+  if (ffs)
+    idx = offset / ffs; // good guess as to the right offset.
+
+  if (count > 1 && 0 == (*frags)[1].m_offset)
+    ++idx;
+
+  do {
+    uint64_t upper = idx >= count ? doc_len : (*frags)[idx + 1].m_offset;
+    uint64_t lower = idx <= 0 ? 0 : (*frags)[idx].m_offset;
+    if (offset < lower)
+      idx = idx / 2;
+    else if (offset >= upper)
+      idx = (count + idx + 1) / 2;
+    else
+      break;
+  } while (true);
+  return idx;
+}
+
+/* There is a fragment available, decide what do to next.
+ */
 int
 CacheVC::openReadMain(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
 {
   cancel_trigger();
   Doc *doc = (Doc *)buf->data();
-  int64_t ntodo = vio.ntodo();
-  int64_t bytes = doc->len - doc_pos;
+  int64_t bytes = vio.ntodo();
   IOBufferBlock *b = NULL;
-  if (seek_to) { // handle do_io_pread
-    if (seek_to >= doc_len) {
-      vio.ndone = doc_len;
-      return calluser(VC_EVENT_EOS);
+  uint64_t target_offset = resp_range.getOffset();
+  uint64_t lower_bound = frag_upper_bound - doc->data_len();
+
+  if (bytes <= 0)
+    return EVENT_CONT;
+
+  // Start shipping
+  while (bytes > 0 && lower_bound <= target_offset && target_offset < frag_upper_bound) {
+    if (vio.buffer.writer()->max_read_avail() > vio.buffer.writer()->water_mark && vio.ndone) // wait for reader
+      return EVENT_CONT;
+
+    if (resp_range.hasPendingRangeShift()) { // in new range, shift to start location.
+      int b_len;
+      char const *b_str = resp_range.getBoundaryStr(&b_len);
+      size_t r_idx = resp_range.getIdx();
+
+      doc_pos = doc->prefix_len() + (target_offset - lower_bound);
+
+      vio.ndone +=
+        HTTPRangeSpec::writePartBoundary(vio.buffer.writer(), b_str, b_len, doc_len, resp_range[r_idx]._min, resp_range[r_idx]._max,
+                                         resp_range.getContentTypeField(), r_idx >= (resp_range.count() - 1));
+      resp_range.consumeRangeShift();
+      Debug("amc", "Range boundary for range %" PRIu64, r_idx);
     }
-#ifdef HTTP_CACHE
-    HTTPInfo::FragOffset *frags = alternate.get_frag_table();
-    if (is_debug_tag_set("cache_seek")) {
-      char b[33], c[33];
-      Debug("cache_seek", "Seek @ %" PRId64 " in %s from #%d @ %" PRId64 "/%d:%s", seek_to, first_key.toHexStr(b), fragment,
-            doc_pos, doc->len, doc->key.toHexStr(c));
+
+    bytes = std::min(doc->len - doc_pos, static_cast<int64_t>(resp_range.getRemnantSize()));
+    bytes = std::min(bytes, vio.ntodo());
+    if (bytes > 0) {
+      b = new_IOBufferBlock(buf, bytes, doc_pos);
+      b->_buf_end = b->_end;
+      vio.buffer.writer()->append_block(b);
+      vio.ndone += bytes;
+      doc_pos += bytes;
+      resp_range.consume(bytes);
+      Debug("amc", "shipped %" PRId64 " bytes at target offset %" PRIu64, bytes, target_offset);
+      target_offset = resp_range.getOffset();
     }
-    /* Because single fragment objects can migrate to hang off an alt vector
-       they can appear to the VC as multi-fragment when they are not really.
-       The essential difference is the existence of a fragment table.
-    */
-    if (frags) {
-      int target = 0;
-      HTTPInfo::FragOffset next_off = frags[target];
-      int lfi = static_cast<int>(alternate.get_frag_offset_count()) - 1;
-      ink_assert(lfi >= 0); // because it's not a single frag doc.
-
-      /* Note: frag[i].offset is the offset of the first byte past the
-         i'th fragment. So frag[0].offset is the offset of the first
-         byte of fragment 1. In addition the # of fragments is one
-         more than the fragment table length, the start of the last
-         fragment being the last offset in the table.
-      */
-      if (fragment == 0 || seek_to < frags[fragment - 1] || (fragment <= lfi && frags[fragment] <= seek_to)) {
-        // search from frag 0 on to find the proper frag
-        while (seek_to >= next_off && target < lfi) {
-          next_off = frags[++target];
-        }
-        if (target == lfi && seek_to >= next_off)
-          ++target;
-      } else { // shortcut if we are in the fragment already
-        target = fragment;
-      }
-      if (target != fragment) {
-        // Lread will read the next fragment always, so if that
-        // is the one we want, we don't need to do anything
-        int cfi = fragment;
-        --target;
-        while (target > fragment) {
-          next_CacheKey(&key, &key);
-          ++fragment;
-        }
-        while (target < fragment) {
-          prev_CacheKey(&key, &key);
-          --fragment;
-        }
 
-        if (is_debug_tag_set("cache_seek")) {
-          char target_key_str[33];
-          key.toHexStr(target_key_str);
-          Debug("cache_seek", "Seek #%d @ %" PRId64 " -> #%d @ %" PRId64 ":%s", cfi, doc_pos, target, seek_to, target_key_str);
-        }
-        goto Lread;
+    if (vio.ntodo() <= 0)
+      return calluser(VC_EVENT_READ_COMPLETE);
+    else if (calluser(VC_EVENT_READ_READY) == EVENT_DONE)
+      return EVENT_DONE;
+  }
+
+
+#ifdef HTTP_CACHE
+  if (resp_range.getRemnantSize()) {
+    FragmentDescriptorTable *frags = alternate.get_frag_table();
+    int n_frags = alternate.get_frag_count();
+
+    // Quick check for offset in next fragment - very common
+    if (target_offset >= frag_upper_bound && (!frags || fragment >= n_frags || target_offset <= (*frags)[fragment].m_offset)) {
+      Debug("amc", "Non-seeking continuation to next fragment");
+    } else {
+      int target = -1; // target fragment index.
+
+      if (is_debug_tag_set("amc")) {
+        char b[33], c[33];
+        Debug("amc", "Seek @ %" PRIu64 " [r#=%d] in %s from #%d @ %" PRIu64 "/%d/%" PRId64 ":%s%s", target_offset,
+              resp_range.getIdx(), first_key.toHexStr(b), fragment, frag_upper_bound, doc->len, doc->total_len,
+              doc->key.toHexStr(c), (frags ? "" : "no frag table"));
       }
+
+      target = this->frag_idx_for_offset(target_offset);
+      this->update_key_to_frag_idx(target);
+      /// one frag short, because it gets bumped when the fragment is actually read.
+      frag_upper_bound = target > 0 ? (*frags)[target].m_offset : 0;
+      Debug("amc", "Fragment seek from %d to %d target offset %" PRIu64, fragment - 1, target, target_offset);
     }
-    doc_pos = doc->prefix_len() + seek_to;
-    if (fragment)
-      doc_pos -= static_cast<int64_t>(frags[fragment - 1]);
-    vio.ndone = 0;
-    seek_to = 0;
-    ntodo = vio.ntodo();
-    bytes = doc->len - doc_pos;
-    if (is_debug_tag_set("cache_seek")) {
-      char target_key_str[33];
-      key.toHexStr(target_key_str);
-      Debug("cache_seek", "Read # %d @ %" PRId64 "/%d for %" PRId64, fragment, doc_pos, doc->len, bytes);
-    }
-#endif
   }
-  if (ntodo <= 0)
-    return EVENT_CONT;
-  if (vio.buffer.writer()->max_read_avail() > vio.buffer.writer()->water_mark && vio.ndone) // initiate read of first block
-    return EVENT_CONT;
-  if ((bytes <= 0) && vio.ntodo() >= 0)
-    goto Lread;
-  if (bytes > vio.ntodo())
-    bytes = vio.ntodo();
-  b = new_IOBufferBlock(buf, bytes, doc_pos);
-  b->_buf_end = b->_end;
-  vio.buffer.writer()->append_block(b);
-  vio.ndone += bytes;
-  doc_pos += bytes;
-  if (vio.ntodo() <= 0)
-    return calluser(VC_EVENT_READ_COMPLETE);
-  else {
-    if (calluser(VC_EVENT_READ_READY) == EVENT_DONE)
-      return EVENT_DONE;
-    // we have to keep reading until we give the user all the
-    // bytes it wanted or we hit the watermark.
-    if (vio.ntodo() > 0 && !vio.buffer.writer()->high_water())
-      goto Lread;
-    return EVENT_CONT;
-  }
-Lread : {
-  if (vio.ndone >= (int64_t)doc_len)
+#endif
+
+  if (vio.ntodo() > 0 && 0 == resp_range.getRemnantSize())
     // reached the end of the document and the user still wants more
     return calluser(VC_EVENT_EOS);
   last_collision = 0;
@@ -733,25 +625,20 @@ Lread : {
   if (dir_probe(&key, vol, &dir, &last_collision)) {
     SET_HANDLER(&CacheVC::openReadReadDone);
     int ret = do_read_call(&key);
-    if (ret == EVENT_RETURN)
-      goto Lcallreturn;
+    if (ret == EVENT_RETURN) {
+      lock.release();
+      return handleEvent(AIO_EVENT_DONE, 0);
+    }
     return EVENT_CONT;
-  } else if (write_vc) {
-    if (writer_done()) {
-      last_collision = NULL;
-      while (dir_probe(&earliest_key, vol, &dir, &last_collision)) {
-        if (dir_offset(&dir) == dir_offset(&earliest_dir)) {
-          DDebug("cache_read_agg", "%p: key: %X ReadMain complete: %d", this, first_key.slice32(1), (int)vio.ndone);
-          doc_len = vio.ndone;
-          goto Leos;
-        }
-      }
+  } else {
+    if (!od->wait_for(earliest_key, this, target_offset)) {
       DDebug("cache_read_agg", "%p: key: %X ReadMain writer aborted: %d", this, first_key.slice32(1), (int)vio.ndone);
-      goto Lerror;
+      lock.release();
+      return calluser(VC_EVENT_ERROR);
     }
-    DDebug("cache_read_agg", "%p: key: %X ReadMain retrying: %d", this, first_key.slice32(1), (int)vio.ndone);
+    DDebug("cache_read_agg", "%p: key: %X ReadMain waiting: %d", this, first_key.slice32(1), (int)vio.ndone);
     SET_HANDLER(&CacheVC::openReadMain);
-    VC_SCHED_WRITER_RETRY();
+    return EVENT_CONT;
   }
   if (is_action_tag_set("cache"))
     ink_release_assert(false);
@@ -759,15 +646,45 @@ Lread : {
           key.slice32(1));
   // remove the directory entry
   dir_delete(&earliest_key, vol, &earliest_dir);
-}
-Lerror:
+  lock.release();
+  // Lerror:
   return calluser(VC_EVENT_ERROR);
-Leos:
+  // Leos:
   return calluser(VC_EVENT_EOS);
-Lcallreturn:
-  return handleEvent(AIO_EVENT_DONE, 0);
 }
 
+int
+CacheVC::openReadWaitEarliest(int evid, Event *)
+{
+  int zret = EVENT_CONT;
+  cancel_trigger();
+
+  CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
+  if (!lock.is_locked())
+    VC_SCHED_LOCK_RETRY();
+  Debug("amc", "[CacheVC::openReadWaitEarliest] [%d]", evid);
+  if (NULL == vol->open_read(&first_key)) {
+    // Writer is gone, so no more data for which to wait.
+    // Best option is to just start over from the first frag.
+    // Most likely scenario - object turned out to be a resident alternate so
+    // there's no explicit earliest frag.
+    lock.release();
+    SET_HANDLER(&self::openReadStartHead);
+    //    od = NULL;
+    key = first_key;
+    return handleEvent(EVENT_IMMEDIATE, 0);
+  } else if (dir_probe(&key, vol, &earliest_dir, &last_collision) || dir_lookaside_probe(&key, vol, &earliest_dir, NULL)) {
+    dir = earliest_dir;
+    SET_HANDLER(&self::openReadStartEarliest);
+    if ((zret = do_read_call(&key)) == EVENT_RETURN) {
+      lock.release();
+      return handleEvent(AIO_EVENT_DONE, 0);
+    }
+  }
+  return zret;
+}
+
+
 /*
   This code follows CacheVC::openReadStartHead closely,
   if you change this you might have to change that.
@@ -822,6 +739,8 @@ CacheVC::openReadStartEarliest(int /* event ATS_UNUSED */, Event * /* e ATS_UNUS
     earliest_key = key;
     doc_pos = doc->prefix_len();
     next_CacheKey(&key, &doc->key);
+    fragment = 1;
+    frag_upper_bound = doc->data_len();
     vol->begin_read(this);
     if (vol->within_hit_evacuate_window(&earliest_dir) &&
         (!cache_config_hit_evacuate_size_limit || doc_len <= (uint64_t)cache_config_hit_evacuate_size_limit)) {
@@ -840,9 +759,22 @@ CacheVC::openReadStartEarliest(int /* event ATS_UNUSED */, Event * /* e ATS_UNUS
 // read has detected that alternate does not exist in the cache.
 // rewrite the vector.
 #ifdef HTTP_CACHE
-    if (!f.read_from_writer_called && frag_type == CACHE_FRAG_TYPE_HTTP) {
+    // It's OK if there's a writer for this alternate, we can wait on it.
+    if (od && od->has_writer(earliest_key)) {
+      wake_up_thread = mutex->thread_holding;
+      od->wait_for(earliest_key, this, 0);
+      lock.release();
+      // The SM must be signaled that the cache read is open even if we haven't got the earliest frag
+      // yet because otherwise it won't set up the read side of the tunnel before the write side finishes
+      // and terminates the SM (in the case of a resident alternate). But the VC can't be left with this
+      // handler or it will confuse itself when it wakes up from the earliest frag read. So we put it
+      // in a special wait state / handler and then signal the SM.
+      SET_HANDLER(&self::openReadWaitEarliest);
+      return callcont(CACHE_EVENT_OPEN_READ); // must signal read is open
+    } else if (frag_type == CACHE_FRAG_TYPE_HTTP) {
       // don't want any writers while we are evacuating the vector
-      if (!vol->open_write(this, false, 1)) {
+      ink_release_assert(!"[amc] Not handling multiple writers with vector evacuate");
+      if (!vol->open_write(this)) {
         Doc *doc1 = (Doc *)first_buf->data();
         uint32_t len = this->load_http_info(write_vector, doc1);
         ink_assert(len == doc1->hlen && write_vector->count() > 0);
@@ -939,6 +871,8 @@ CacheVC::openReadVecWrite(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */
       if (od->move_resident_alt)
         dir_insert(&od->single_doc_key, vol, &od->single_doc_dir);
       int alt_ndx = HttpTransactCache::SelectFromAlternates(write_vector, &request, params);
+      Debug("amc", "[openReadVecWrite] select alt: %d %p (current %p)", alt_ndx, write_vector->get(alt_ndx)->m_alt,
+            alternate.m_alt);
       vol->close_write(this);
       if (alt_ndx >= 0) {
         vector.clear();
@@ -963,6 +897,10 @@ Lrestart:
 /*
   This code follows CacheVC::openReadStartEarliest closely,
   if you change this you might have to change that.
+
+  This handles the I/O completion of reading the first doc of the object.
+  If there are alternates, we chain to openreadStartEarliest to read the
+  earliest doc.
 */
 int
 CacheVC::openReadStartHead(int event, Event *e)
@@ -1044,13 +982,18 @@ CacheVC::openReadStartHead(int event, Event *e)
         err = ECACHE_BAD_META_DATA;
         goto Ldone;
       }
-      if (cache_config_select_alternate) {
+      // If @a params is @c NULL then we're a retry from a range request pair so don't do alt select.
+      // Instead try the @a earliest_key - if that's a match then that's the correct alt, written
+      // by the paired write VC.
+      if (cache_config_select_alternate && params) {
         alternate_index = HttpTransactCache::SelectFromAlternates(&vector, &request, params);
         if (alternate_index < 0) {
           err = ECACHE_ALT_MISS;
           goto Ldone;
         }
-      } else
+        Debug("amc", "[openReadStartHead] select alt: %d %p (current %p, od %p)", alternate_index,
+              vector.get(alternate_index)->m_alt, alternate.m_alt, od);
+      } else if (CACHE_ALT_INDEX_DEFAULT == (alternate_index = get_alternate_index(&vector, earliest_key)))
         alternate_index = 0;
       alternate_tmp = vector.get(alternate_index);
       if (!alternate_tmp->valid()) {
@@ -1064,12 +1007,24 @@ CacheVC::openReadStartHead(int event, Event *e)
       alternate.copy_shallow(alternate_tmp);
       alternate.object_key_get(&key);
       doc_len = alternate.object_size_get();
+
+      // If the object length is known we can check the range.
+      // Otherwise we have to leave it vague and talk to the origin to get full length info.
+      if (alternate.m_alt->m_flag.content_length_p && !resp_range.apply(doc_len)) {
+        err = ECACHE_UNSATISFIABLE_RANGE;
+        goto Ldone;
+      }
+      if (resp_range.isMulti())
+        resp_range.setContentTypeFromResponse(alternate.response_get()).generateBoundaryStr(earliest_key);
+
       if (key == doc->key) { // is this my data?
         f.single_fragment = doc->single_fragment();
         ink_assert(f.single_fragment); // otherwise need to read earliest
         ink_assert(doc->hlen);
         doc_pos = doc->prefix_len();
         next_CacheKey(&key, &doc->key);
+        fragment = 1;
+        frag_upper_bound = doc->data_len();
       } else {
         f.single_fragment = false;
       }
@@ -1077,6 +1032,8 @@ CacheVC::openReadStartHead(int event, Event *e)
 #endif
     {
       next_CacheKey(&key, &doc->key);
+      fragment = 1;
+      frag_upper_bound = doc->data_len();
       f.single_fragment = doc->single_fragment();
       doc_pos = doc->prefix_len();
       doc_len = doc->total_len;
@@ -1087,7 +1044,7 @@ CacheVC::openReadStartHead(int event, Event *e)
       Debug("cache_read", "CacheReadStartHead - read %s target %s - %s %d of %" PRId64 " bytes, %d fragments",
             doc->key.toHexStr(xt), key.toHexStr(yt), f.single_fragment ? "single" : "multi", doc->len, doc->total_len,
 #ifdef HTTP_CACHE
-            alternate.get_frag_offset_count()
+            alternate.get_frag_count()
 #else
             0
 #endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/CacheTest.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/CacheTest.cc b/iocore/cache/CacheTest.cc
index fb55123..1921060 100644
--- a/iocore/cache/CacheTest.cc
+++ b/iocore/cache/CacheTest.cc
@@ -387,8 +387,8 @@ EXCLUSIVE_REGRESSION_TEST(cache)(RegressionTest *t, int /* atype ATS_UNUSED */,
 
   r_sequential(t, write_test.clone(), lookup_test.clone(), r_sequential(t, 10, read_test.clone()), remove_test.clone(),
                lookup_fail_test.clone(), read_fail_test.clone(), remove_fail_test.clone(), replace_write_test.clone(),
-               replace_test.clone(), replace_read_test.clone(), large_write_test.clone(), pread_test.clone(), NULL_PTR)
-    ->run(pstatus);
+               replace_test.clone(), replace_read_test.clone(), large_write_test.clone(), pread_test.clone(),
+               NULL_PTR)->run(pstatus);
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/CacheVol.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/CacheVol.cc b/iocore/cache/CacheVol.cc
index f9047f3..d5d85bc 100644
--- a/iocore/cache/CacheVol.cc
+++ b/iocore/cache/CacheVol.cc
@@ -413,8 +413,9 @@ CacheVC::scanOpenWrite(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
     }
 
     Debug("cache_scan", "trying for writer lock");
-    if (vol->open_write(this, false, 1)) {
-      writer_lock_retry++;
+    if (vol->open_write(this)) {
+      // [amc] This tried to restrict to one writer, must fix at some point.
+      ++writer_lock_retry;
       SET_HANDLER(&CacheVC::scanOpenWrite);
       mutex->thread_holding->schedule_in_local(this, scan_msec_delay);
       return EVENT_CONT;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/CacheWrite.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/CacheWrite.cc b/iocore/cache/CacheWrite.cc
index 3740d21..1fbb16a 100644
--- a/iocore/cache/CacheWrite.cc
+++ b/iocore/cache/CacheWrite.cc
@@ -33,13 +33,19 @@
 // used to get the alternate which is actually present in the document
 #ifdef HTTP_CACHE
 int
-get_alternate_index(CacheHTTPInfoVector *cache_vector, CacheKey key)
+get_alternate_index(CacheHTTPInfoVector *cache_vector, CacheKey key, int idx)
 {
   int alt_count = cache_vector->count();
   CacheHTTPInfo *obj;
   if (!alt_count)
     return -1;
+  // See if the hint is correct.
+  if (0 <= idx && idx < alt_count && cache_vector->get(idx)->compare_object_key(&key))
+    return idx;
+  // Otherwise scan the vector.
   for (int i = 0; i < alt_count; i++) {
+    if (i == idx)
+      continue; // already checked that one.
     obj = cache_vector->get(i);
     if (obj->compare_object_key(&key)) {
       // Debug("cache_key", "Resident alternate key  %X", key.slice32(0));
@@ -63,20 +69,23 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
     VC_SCHED_LOCK_RETRY();
   int ret = 0;
   {
-    CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
+    CACHE_TRY_LOCK(lock, od->mutex, mutex->thread_holding);
     if (!lock.is_locked() || od->writing_vec)
       VC_SCHED_LOCK_RETRY();
 
     int vec = alternate.valid();
     if (f.update) {
       // all Update cases. Need to get the alternate index.
-      alternate_index = get_alternate_index(write_vector, update_key);
+      alternate_index = get_alternate_index(write_vector, update_key, alternate_index);
       Debug("cache_update", "updating alternate index %d frags %d", alternate_index,
-            alternate_index >= 0 ? write_vector->get(alternate_index)->get_frag_offset_count() : -1);
+            alternate_index >= 0 ? write_vector->get(alternate_index)->get_frag_count() : -1);
       // if its an alternate delete
       if (!vec) {
         ink_assert(!total_len);
         if (alternate_index >= 0) {
+          MUTEX_TRY_LOCK(stripe_lock, vol->mutex, mutex->thread_holding);
+          if (!stripe_lock.is_locked())
+            VC_SCHED_LOCK_RETRY();
           write_vector->remove(alternate_index, true);
           alternate_index = CACHE_ALT_REMOVED;
           if (!write_vector->count())
@@ -98,16 +107,10 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
       write_vector->remove(0, true);
     }
     if (vec) {
-      /* preserve fragment offset data from old info. This method is
-         called iff the update is a header only update so the fragment
-         data should remain valid.
-      */
-      if (alternate_index >= 0)
-        alternate.copy_frag_offsets_from(write_vector->get(alternate_index));
       alternate_index = write_vector->insert(&alternate, alternate_index);
     }
 
-    if (od->move_resident_alt && first_buf._ptr() && !od->has_multiple_writers()) {
+    if (od->move_resident_alt && first_buf._ptr() /* && !od->has_multiple_writers() */) {
       Doc *doc = (Doc *)first_buf->data();
       int small_doc = (int64_t)doc->data_len() < (int64_t)cache_config_alt_rewrite_max_size;
       int have_res_alt = doc->key == od->single_doc_key;
@@ -134,7 +137,7 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
     od->writing_vec = 1;
     f.use_first_key = 1;
     SET_HANDLER(&CacheVC::openWriteCloseHeadDone);
-    ret = do_write_call();
+    ret = do_write_lock_call();
   }
   if (ret == EVENT_RETURN)
     return handleEvent(AIO_EVENT_DONE, 0);
@@ -161,7 +164,7 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
      (f.use_fist_key || f.evac_vector) is set. Write_vector is written to disk
    - alternate_index. Used only if write_vector needs to be written to disk.
      Used to find out the VC's alternate in the write_vector and set its
-     length to tatal_len.
+     length to total_len.
    - write_len. The number of bytes for this fragment.
    - total_len. The total number of bytes for the document so far.
      Doc->total_len and alternate's total len is set to this value.
@@ -316,8 +319,8 @@ Vol::aggWriteDone(int event, Event *e)
     header->last_write_pos = header->write_pos;
     header->write_pos += io.aiocb.aio_nbytes;
     ink_assert(header->write_pos >= start);
-    DDebug("cache_agg", "Dir %s, Write: %" PRIu64 ", last Write: %" PRIu64 "\n", hash_text.get(), header->write_pos,
-           header->last_write_pos);
+    Debug("cache_agg", "Dir %s, Write: %" PRIu64 ", last Write: %" PRIu64 "\n", hash_text.get(), header->write_pos,
+          header->last_write_pos);
     ink_assert(header->write_pos == header->agg_pos);
     if (header->write_pos + EVACUATION_SIZE > scan_pos)
       periodic_scan();
@@ -722,7 +725,7 @@ agg_copy(char *p, CacheVC *vc)
     IOBufferBlock *res_alt_blk = 0;
 
     uint32_t len = vc->write_len + vc->header_len + vc->frag_len + sizeofDoc;
-    ink_assert(vc->frag_type != CACHE_FRAG_TYPE_HTTP || len != sizeofDoc);
+    ink_assert(vc->frag_type != CACHE_FRAG_TYPE_HTTP || len != sizeofDoc || 0 == vc->fragment);
     ink_assert(vol->round_to_approx_size(len) == vc->agg_len);
     // update copy of directory entry for this document
     dir_set_approx_size(&vc->dir, vc->agg_len);
@@ -781,16 +784,23 @@ agg_copy(char *p, CacheVC *vc)
 #ifdef HTTP_CACHE
       if (vc->frag_type == CACHE_FRAG_TYPE_HTTP) {
         ink_assert(vc->write_vector->count() > 0);
-        if (!vc->f.update && !vc->f.evac_vector) {
-          ink_assert(!(vc->first_key == zero_key));
-          CacheHTTPInfo *http_info = vc->write_vector->get(vc->alternate_index);
-          http_info->object_size_set(vc->total_len);
-        }
-        // update + data_written =>  Update case (b)
-        // need to change the old alternate's object length
-        if (vc->f.update && vc->total_len) {
-          CacheHTTPInfo *http_info = vc->write_vector->get(vc->alternate_index);
-          http_info->object_size_set(vc->total_len);
+        if (vc->resp_range.hasRanges()) {
+          int64_t size = vc->alternate.object_size_get();
+          if (size >= 0)
+            doc->total_len = size;
+        } else {
+          // As the header is finalized the fragment vector should be trimmed if the object is complete.
+          if (!vc->f.update && !vc->f.evac_vector) {
+            ink_assert(!(vc->first_key == zero_key));
+            CacheHTTPInfo *http_info = vc->write_vector->get(vc->alternate_index);
+            http_info->object_size_set(vc->total_len);
+          }
+          // update + data_written =>  Update case (b)
+          // need to change the old alternate's object length
+          if (vc->f.update && vc->total_len) {
+            CacheHTTPInfo *http_info = vc->write_vector->get(vc->alternate_index);
+            http_info->object_size_set(vc->total_len);
+          }
         }
         ink_assert(!(((uintptr_t)&doc->hdr()[0]) & HDR_PTR_ALIGNMENT_MASK));
         ink_assert(vc->header_len == vc->write_vector->marshal(doc->hdr(), vc->header_len));
@@ -1058,6 +1068,35 @@ Lwait:
 }
 
 int
+CacheVC::openWriteEmptyEarliestDone(int event, Event *e)
+{
+  cancel_trigger();
+  if (event == AIO_EVENT_DONE)
+    set_io_not_in_progress();
+  else if (is_io_in_progress())
+    return EVENT_CONT;
+
+  {
+    SCOPED_MUTEX_LOCK(lock, od->mutex, this_ethread());
+    alternate_index = get_alternate_index(write_vector, this->earliest_key);
+    od->write_complete(key, this, io.ok()); // in any case, the IO is over.
+    key = od->key_for(earliest_key, write_pos);
+  }
+
+  SET_HANDLER(&CacheVC::openWriteMain);
+
+  // on error terminate if we're already closed, otherwise notify external continuation.
+  if (!io.ok()) {
+    if (closed) {
+      closed = -1;
+      return die();
+    }
+    return calluser(VC_EVENT_ERROR);
+  }
+  return this->openWriteMain(event, e); // go back to writing our actual data.
+}
+
+int
 CacheVC::openWriteCloseDir(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
 {
   cancel_trigger();
@@ -1068,6 +1107,7 @@ CacheVC::openWriteCloseDir(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED *
       ink_assert(!is_io_in_progress());
       VC_SCHED_LOCK_RETRY();
     }
+    od->close_writer(earliest_key, this);
     vol->close_write(this);
     if (closed < 0 && fragment)
       dir_delete(&earliest_key, vol, &earliest_dir);
@@ -1169,13 +1209,17 @@ CacheVC::openWriteCloseHead(int event, Event *e)
   cancel_trigger();
   f.use_first_key = 1;
   if (io.ok())
-    ink_assert(fragment || (length == (int64_t)total_len));
+    ink_assert(fragment || (length == (int64_t)total_len) ||
+               (resp_range.hasRanges() && alternate.object_size_get() > alternate.get_frag_fixed_size()));
   else
     return openWriteCloseDir(event, e);
-  if (f.data_done)
+  if (f.data_done) {
     write_len = 0;
-  else
+  } else {
     write_len = length;
+    // If we're writing data in the first / header doc, then it's a resident alt.
+    alternate.m_alt->m_flag.complete_p = true;
+  }
 #ifdef HTTP_CACHE
   if (frag_type == CACHE_FRAG_TYPE_HTTP) {
     SET_HANDLER(&CacheVC::updateVector);
@@ -1206,35 +1250,32 @@ CacheVC::openWriteCloseDataDone(int event, Event *e)
     CACHE_TRY_LOCK(lock, vol->mutex, this_ethread());
     if (!lock.is_locked())
       VC_LOCK_RETRY_EVENT();
-    if (!fragment) {
-      ink_assert(key == earliest_key);
-      earliest_dir = dir;
-#ifdef HTTP_CACHE
-    } else {
-      // Store the offset only if there is a table.
-      // Currently there is no alt (and thence no table) for non-HTTP.
-      if (alternate.valid())
-        alternate.push_frag_offset(write_pos);
-#endif
-    }
-    fragment++;
-    write_pos += write_len;
     dir_insert(&key, vol, &dir);
-    blocks = iobufferblock_skip(blocks, &offset, &length, write_len);
-    next_CacheKey(&key, &key);
-    if (length) {
-      write_len = length;
-      if (write_len > MAX_FRAG_SIZE)
-        write_len = MAX_FRAG_SIZE;
-      if ((ret = do_write_call()) == EVENT_RETURN)
-        goto Lcallreturn;
-      return ret;
-    }
-    f.data_done = 1;
-    return openWriteCloseHead(event, e); // must be called under vol lock from here
   }
-Lcallreturn:
-  return handleEvent(AIO_EVENT_DONE, 0);
+
+  if (key == earliest_key)
+    earliest_dir = dir;
+
+  {
+    SCOPED_MUTEX_LOCK(lock, od->mutex, mutex->thread_holding);
+    write_vector->write_complete(earliest_key, this, true);
+  }
+
+  write_pos += write_len;
+  blocks = iobufferblock_skip(blocks, &offset, &length, write_len);
+  next_CacheKey(&key, &key);
+  if (length) {
+    write_len = length;
+    if (write_len > MAX_FRAG_SIZE)
+      write_len = MAX_FRAG_SIZE;
+    if ((ret = do_write_call()) == EVENT_RETURN)
+      return handleEvent(AIO_EVENT_DONE, 0);
+    return ret;
+  }
+
+  f.data_done = 1;
+  return openWriteCloseHead(event, e); // must be called under vol lock from here
+                                       // [amc] don't see why, guess we'll find out.
 }
 
 int
@@ -1267,8 +1308,9 @@ CacheVC::openWriteClose(int event, Event *e)
       return openWriteCloseDir(event, e);
 #endif
     }
-    if (length && (fragment || length > MAX_FRAG_SIZE)) {
+    if (length && (fragment || length > MAX_FRAG_SIZE || alternate.object_size_get() > alternate.get_frag_fixed_size())) {
       SET_HANDLER(&CacheVC::openWriteCloseDataDone);
+      this->updateWriteStateFromRange();
       write_len = length;
       if (write_len > MAX_FRAG_SIZE)
         write_len = MAX_FRAG_SIZE;
@@ -1296,48 +1338,114 @@ CacheVC::openWriteWriteDone(int event, Event *e)
     SET_HANDLER(&CacheVC::openWriteMain);
     return calluser(VC_EVENT_ERROR);
   }
+
   {
     CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding);
     if (!lock.is_locked())
       VC_LOCK_RETRY_EVENT();
-    // store the earliest directory. Need to remove the earliest dir
-    // in case the writer aborts.
-    if (!fragment) {
-      ink_assert(key == earliest_key);
-      earliest_dir = dir;
-#ifdef HTTP_CACHE
-    } else {
-      // Store the offset only if there is a table.
-      // Currently there is no alt (and thence no table) for non-HTTP.
-      if (alternate.valid())
-        alternate.push_frag_offset(write_pos);
-#endif
-    }
-    ++fragment;
-    write_pos += write_len;
     dir_insert(&key, vol, &dir);
-    DDebug("cache_insert", "WriteDone: %X, %X, %d", key.slice32(0), first_key.slice32(0), write_len);
-    blocks = iobufferblock_skip(blocks, &offset, &length, write_len);
-    next_CacheKey(&key, &key);
   }
+
+  if (key == earliest_key)
+    earliest_dir = dir;
+
+  {
+    SCOPED_MUTEX_LOCK(lock, od->mutex, mutex->thread_holding);
+    write_vector->write_complete(earliest_key, this, true);
+  }
+
+  DDebug("cache_insert", "WriteDone: %X, %X, %d", key.slice32(0), first_key.slice32(0), write_len);
+
+  resp_range.consume(write_len);
+  blocks = iobufferblock_skip(blocks, &offset, &length, write_len);
+
   if (closed)
     return die();
   SET_HANDLER(&CacheVC::openWriteMain);
   return openWriteMain(event, e);
 }
 
-static inline int
-target_fragment_size()
+int64_t
+CacheProcessor::get_fixed_fragment_size() const
 {
   return cache_config_target_fragment_size - sizeofDoc;
 }
 
+
+Action *
+CacheVC::do_write_init()
+{
+  Debug("amc", "[do_write_init] vc=%p", this);
+  SET_CONTINUATION_HANDLER(this, &CacheVC::openWriteInit);
+  return EVENT_DONE == this->openWriteInit(EVENT_IMMEDIATE, 0) ? ACTION_RESULT_DONE : &_action;
+}
+
+/* Do some initial setup and then switch over to openWriteMain
+ */
+int
+CacheVC::openWriteInit(int eid, Event *event)
+{
+  Debug("amc", "[openWriteInit] vc=%p", this);
+  {
+    CACHE_TRY_LOCK(lock, od->mutex, mutex->thread_holding);
+    if (!lock.is_locked()) {
+      trigger = mutex->thread_holding->schedule_in_local(this, HRTIME_MSECONDS(cache_config_mutex_retry_delay), eid);
+      return EVENT_CONT;
+    }
+
+    if (alternate.valid() && earliest_key != alternate.object_key_get()) {
+      // When the VC is created it sets up for a new alternate write. If we're back filling we
+      // need to tweak that back to the existing alternate.
+      Debug("amc", "[CacheVC::openWriteInit] updating earliest key from alternate");
+      alternate.object_key_get(&earliest_key);
+    }
+    // Get synchronized with the OD vector.
+    if (-1 == (alternate_index = get_alternate_index(write_vector, earliest_key))) {
+      Debug("amc", "[openWriteInit] alt not found, inserted");
+      alternate_index = write_vector->insert(&alternate); // not there, add it
+    } else {
+      HTTPInfo *base = write_vector->get(alternate_index);
+      if (!base->is_writeable()) {
+        // The alternate instance is mapped directly on a read buffer, which we can't modify.
+        // It must be replaced with a live, mutable one.
+        Debug("amc", "Updating OD vector element %d : 0x%p with mutable version %p", alternate_index, base, alternate.m_alt);
+        alternate.copy(base);           // make a local copy
+        base->copy_shallow(&alternate); // paste the mutable copy back.
+      }
+    }
+    // mark us as an writer.
+    write_vector->data[alternate_index]._writers.push(this);
+    alternate.copy_shallow(write_vector->get(alternate_index));
+
+    if (this == od->open_writer) {
+      od->open_writer = NULL;
+      CacheVC *reader;
+      while (NULL != (reader = od->open_waiting.pop())) {
+        Debug("amc", "[CacheVC::openWriteInit] wake up %p", reader);
+        reader->wake_up_thread->schedule_imm(reader);
+      }
+    }
+  }
+
+  if (resp_range.hasRanges()) {
+    resp_range.start();
+    //    this->updateWriteStateFromRange();
+  }
+
+  //  key = alternate.get_frag_key_of(write_pos);
+  SET_HANDLER(&CacheVC::openWriteMain);
+  return openWriteMain(eid, event);
+  //  return callcont(CACHE_EVENT_OPEN_WRITE);
+  //  return EVENT_DONE;
+}
+
 int
 CacheVC::openWriteMain(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
 {
   cancel_trigger();
   int called_user = 0;
   ink_assert(!is_io_in_progress());
+  Debug("amc", "[CacheVC::openWriteMain]");
 Lagain:
   if (!vio.buffer.writer()) {
     if (calluser(VC_EVENT_WRITE_READY) == EVENT_DONE)
@@ -1353,10 +1461,16 @@ Lagain:
     if (vio.ntodo() <= 0)
       return EVENT_CONT;
   }
+
   int64_t ntodo = (int64_t)(vio.ntodo() + length);
   int64_t total_avail = vio.buffer.reader()->read_avail();
   int64_t avail = total_avail;
   int64_t towrite = avail + length;
+  int64_t ffs = cacheProcessor.get_fixed_fragment_size();
+
+  Debug("amc", "[CacheVC::openWriteMain] ntodo=%" PRId64 " avail=%" PRId64 " towrite=%" PRId64 " frag=%d", ntodo, avail, towrite,
+        fragment);
+
   if (towrite > ntodo) {
     avail -= (towrite - ntodo);
     towrite = ntodo;
@@ -1369,17 +1483,19 @@ Lagain:
     blocks = vio.buffer.reader()->block;
     offset = vio.buffer.reader()->start_offset;
   }
+
   if (avail > 0) {
     vio.buffer.reader()->consume(avail);
     vio.ndone += avail;
     total_len += avail;
   }
   length = (uint64_t)towrite;
-  if (length > target_fragment_size() && (length < target_fragment_size() + target_fragment_size() / 4))
-    write_len = target_fragment_size();
+  // [amc] Need to change this to be exactly the fixed fragment size for this alternate.
+  if (length > ffs && (length < ffs + ffs / 4))
+    write_len = ffs;
   else
     write_len = length;
-  bool not_writing = towrite != ntodo && towrite < target_fragment_size();
+  bool not_writing = towrite != ntodo && towrite < ffs;
   if (!called_user) {
     if (not_writing) {
       called_user = 1;
@@ -1391,12 +1507,61 @@ Lagain:
   }
   if (not_writing)
     return EVENT_CONT;
+
+  this->updateWriteStateFromRange();
+
+  {
+    CacheHTTPInfo *alt = &alternate;
+    SCOPED_MUTEX_LOCK(lock, od->mutex, this_ethread());
+
+#if 0
+    alternate_index = get_alternate_index(write_vector, earliest_key);
+    if (alternate_index < 0)
+      alternate_index = write_vector->insert(&alternate, alternate_index);
+
+    alt = write_vector->get(alternate_index);
+#endif
+
+    if (fragment != 0 && !alt->m_alt->m_earliest.m_flag.cached_p) {
+      SET_HANDLER(&CacheVC::openWriteEmptyEarliestDone);
+      if (!od->is_write_active(earliest_key, 0)) {
+        write_len = 0;
+        key = earliest_key;
+        Debug("amc", "[CacheVC::openWriteMain] writing empty earliest");
+      } else {
+        // go on the wait list
+        od->wait_for(earliest_key, this, 0);
+        not_writing = true;
+      }
+    } else if (od->is_write_active(earliest_key, write_pos)) {
+      od->wait_for(earliest_key, this, write_pos);
+      not_writing = true;
+    } else if (alternate.is_frag_cached(fragment)) {
+      not_writing = true;
+      Debug("amc", "Fragment %d already cached", fragment);
+      // Consume the data, as we won't be using it.
+      resp_range.consume(write_len);
+      blocks = iobufferblock_skip(blocks, &offset, &length, write_len);
+      // need to kick start things again or we'll stall.
+      return this->handleEvent(EVENT_IMMEDIATE);
+    } else {
+      od->write_active(earliest_key, this, write_pos);
+    }
+  }
+
+  if (0 == write_len) // need to set up the write not under OpenDir lock.
+    return do_write_lock_call();
+
   if (towrite == ntodo && f.close_complete) {
     closed = 1;
     SET_HANDLER(&CacheVC::openWriteClose);
     return openWriteClose(EVENT_NONE, NULL);
+  } else if (not_writing) {
+    return EVENT_CONT;
   }
+
   SET_HANDLER(&CacheVC::openWriteWriteDone);
+  Debug("amc", "[CacheVC::openWriteMain] doing write call");
   return do_write_lock_call();
 }
 
@@ -1434,7 +1599,7 @@ Lcollision : {
   }
 }
 Ldone:
-  SET_HANDLER(&CacheVC::openWriteMain);
+  SET_HANDLER(&CacheVC::openWriteInit);
   return callcont(CACHE_EVENT_OPEN_WRITE);
 Lcallreturn:
   return handleEvent(AIO_EVENT_DONE, 0); // hopefully a tail call
@@ -1458,7 +1623,7 @@ CacheVC::openWriteStartDone(int event, Event *e)
     if (!lock.is_locked())
       VC_LOCK_RETRY_EVENT();
 
-    if (_action.cancelled && (!od || !od->has_multiple_writers()))
+    if (_action.cancelled && (!od /* || !od->has_multiple_writers() */))
       goto Lcancel;
 
     if (event == AIO_EVENT_DONE) { // vector read done
@@ -1501,15 +1666,17 @@ CacheVC::openWriteStartDone(int event, Event *e)
     }
 
   Lcollision:
-    int if_writers = ((uintptr_t)info == CACHE_ALLOW_MULTIPLE_WRITES);
+    //    int if_writers = ((uintptr_t)info == CACHE_ALLOW_MULTIPLE_WRITES);
     if (!od) {
-      if ((err = vol->open_write(this, if_writers, cache_config_http_max_alts > 1 ? cache_config_http_max_alts : 0)) > 0)
+      if ((err = vol->open_write(this)) > 0)
         goto Lfailure;
-      if (od->has_multiple_writers()) {
-        MUTEX_RELEASE(lock);
-        SET_HANDLER(&CacheVC::openWriteMain);
-        return callcont(CACHE_EVENT_OPEN_WRITE);
-      }
+      /*
+            if (od->has_multiple_writers()) {
+              MUTEX_RELEASE(lock);
+              SET_HANDLER(&CacheVC::openWriteInit);
+              return this->openWriteInit(EVENT_IMMEDIATE, 0);
+            }
+      */
     }
     // check for collision
     if (dir_probe(&first_key, vol, &dir, &last_collision)) {
@@ -1528,8 +1695,9 @@ Lsuccess:
   od->reading_vec = 0;
   if (_action.cancelled)
     goto Lcancel;
-  SET_HANDLER(&CacheVC::openWriteMain);
+  SET_HANDLER(&CacheVC::openWriteInit);
   return callcont(CACHE_EVENT_OPEN_WRITE);
+//  return this->openWriteInit(EVENT_IMMEDIATE, 0);
 
 Lfailure:
   CACHE_INCREMENT_DYN_STAT(base_stat + CACHE_STAT_FAILURE);
@@ -1553,7 +1721,7 @@ CacheVC::openWriteStartBegin(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED
   cancel_trigger();
   if (_action.cancelled)
     return free_CacheVC(this);
-  if (((err = vol->open_write_lock(this, false, 1)) > 0)) {
+  if (((err = vol->open_write_lock(this)) > 0)) {
     CACHE_INCREMENT_DYN_STAT(base_stat + CACHE_STAT_FAILURE);
     free_CacheVC(this);
     _action.continuation->handleEvent(CACHE_EVENT_OPEN_WRITE_FAILED, (void *)-err);
@@ -1566,7 +1734,7 @@ CacheVC::openWriteStartBegin(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED
     return openWriteOverwrite(EVENT_IMMEDIATE, 0);
   } else {
     // write by key
-    SET_HANDLER(&CacheVC::openWriteMain);
+    SET_HANDLER(&CacheVC::openWriteInit);
     return callcont(CACHE_EVENT_OPEN_WRITE);
   }
 }
@@ -1613,7 +1781,7 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheFragType frag_ty
   c->f.sync = (options & CACHE_WRITE_OPT_SYNC) == CACHE_WRITE_OPT_SYNC;
   c->pin_in_cache = (uint32_t)apin_in_cache;
 
-  if ((res = c->vol->open_write_lock(c, false, 1)) > 0) {
+  if ((res = c->vol->open_write_lock(c)) > 0) {
     // document currently being written, abort
     CACHE_INCREMENT_DYN_STAT(c->base_stat + CACHE_STAT_FAILURE);
     cont->handleEvent(CACHE_EVENT_OPEN_WRITE_FAILED, (void *)-res);
@@ -1626,9 +1794,10 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheFragType frag_ty
     return &c->_action;
   }
   if (!c->f.overwrite) {
-    SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteMain);
+    SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteInit);
     c->callcont(CACHE_EVENT_OPEN_WRITE);
     return ACTION_RESULT_DONE;
+    //    return c->do_write_init();
   } else {
     SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteOverwrite);
     if (c->openWriteOverwrite(EVENT_IMMEDIATE, 0) == EVENT_DONE)
@@ -1638,6 +1807,22 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheFragType frag_ty
   }
 }
 
+int
+CacheVC::updateWriteStateFromRange()
+{
+  if (resp_range.hasPendingRangeShift())
+    resp_range.consumeRangeShift();
+  write_pos = resp_range.getOffset();
+  fragment = alternate.get_frag_index_of(write_pos);
+  key = alternate.get_frag_key(fragment);
+  {
+    char tmp[64];
+    Debug("amc", "[writeMain] pos=%" PRId64 " frag=%d/%" PRId64 " key=%s", write_pos, fragment, alternate.get_frag_offset(fragment),
+          key.toHexStr(tmp));
+  }
+  return write_pos;
+}
+
 #ifdef HTTP_CACHE
 // main entry point for writing of http documents
 Action *
@@ -1651,22 +1836,28 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheHTTPInfo *info,
 
   ink_assert(caches[type] == this);
   intptr_t err = 0;
-  int if_writers = (uintptr_t)info == CACHE_ALLOW_MULTIPLE_WRITES;
+  //  int if_writers = (uintptr_t)info == CACHE_ALLOW_MULTIPLE_WRITES;
   CacheVC *c = new_CacheVC(cont);
   ProxyMutex *mutex = cont->mutex;
   c->vio.op = VIO::WRITE;
   c->first_key = *key;
-  /*
-     The transition from single fragment document to a multi-fragment document
-     would cause a problem if the key and the first_key collide. In case of
-     a collision, old vector data could be served to HTTP. Need to avoid that.
-     Also, when evacuating a fragment, we have to decide if its the first_key
-     or the earliest_key based on the dir_tag.
-   */
-  do {
-    rand_CacheKey(&c->key, cont->mutex);
-  } while (DIR_MASK_TAG(c->key.slice32(2)) == DIR_MASK_TAG(c->first_key.slice32(2)));
-  c->earliest_key = c->key;
+  if (info) {
+    info->object_key_get(&c->key);
+    c->earliest_key = c->key;
+  } else {
+    /*
+      The transition from single fragment document to a multi-fragment document
+      would cause a problem if the key and the first_key collide. In case of
+      a collision, old vector data could be served to HTTP. Need to avoid that.
+      Also, when evacuating a fragment, we have to decide if its the first_key
+      or the earliest_key based on the dir_tag.
+    */
+    do {
+      rand_CacheKey(&c->key, cont->mutex);
+    } while (DIR_MASK_TAG(c->key.slice32(2)) == DIR_MASK_TAG(c->first_key.slice32(2)));
+    c->earliest_key = c->key;
+  }
+
   c->frag_type = CACHE_FRAG_TYPE_HTTP;
   c->vol = key_to_vol(key, hostname, host_len);
   Vol *vol = c->vol;
@@ -1715,13 +1906,15 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheHTTPInfo *info,
   {
     CACHE_TRY_LOCK(lock, c->vol->mutex, cont->mutex->thread_holding);
     if (lock.is_locked()) {
-      if ((err = c->vol->open_write(c, if_writers, cache_config_http_max_alts > 1 ? cache_config_http_max_alts : 0)) > 0)
+      if ((err = c->vol->open_write(c)) > 0)
         goto Lfailure;
       // If there are multiple writers, then this one cannot be an update.
       // Only the first writer can do an update. If that's the case, we can
       // return success to the state machine now.;
-      if (c->od->has_multiple_writers())
-        goto Lmiss;
+      /*
+            if (c->od->has_multiple_writers())
+              goto Lmiss;
+      */
       if (!dir_probe(key, c->vol, &c->dir, &c->last_collision)) {
         if (c->f.update) {
           // fail update because vector has been GC'd
@@ -1730,9 +1923,12 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheHTTPInfo *info,
           goto Lfailure;
         }
         // document doesn't exist, begin write
+        ink_assert(NULL == c->od->open_writer);
+        c->od->open_writer = c;
         goto Lmiss;
       } else {
         c->od->reading_vec = 1;
+        c->od->open_writer = c;
         // document exists, read vector
         SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteStartDone);
         switch (c->do_read_call(&c->first_key)) {
@@ -1752,7 +1948,8 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheHTTPInfo *info,
   }
 
 Lmiss:
-  SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteMain);
+  //  return c->do_write_init();
+  SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteInit);
   c->callcont(CACHE_EVENT_OPEN_WRITE);
   return ACTION_RESULT_DONE;
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/I_Cache.h
----------------------------------------------------------------------
diff --git a/iocore/cache/I_Cache.h b/iocore/cache/I_Cache.h
index 77aafe9..e42ddf5 100644
--- a/iocore/cache/I_Cache.h
+++ b/iocore/cache/I_Cache.h
@@ -49,6 +49,7 @@
 #define CACHE_COMPRESSION_LIBZ 2
 #define CACHE_COMPRESSION_LIBLZMA 3
 
+struct CacheVConnection;
 struct CacheVC;
 struct CacheDisk;
 #ifdef HTTP_CACHE
@@ -56,6 +57,7 @@ class CacheLookupHttpConfig;
 class URL;
 class HTTPHdr;
 class HTTPInfo;
+class HTTPRangeSpec;
 
 typedef HTTPHdr CacheHTTPHdr;
 typedef URL CacheURL;
@@ -80,6 +82,17 @@ struct CacheProcessor : public Processor {
                             CacheFragType frag_type = CACHE_FRAG_TYPE_NONE, const char *hostname = 0, int host_len = 0);
   inkcoreapi Action *open_read(Continuation *cont, const CacheKey *key, bool cluster_cache_local,
                                CacheFragType frag_type = CACHE_FRAG_TYPE_NONE, const char *hostname = 0, int host_len = 0);
+
+  /** Open a cache reader from an already open writer.
+
+      This is used for partial content on a cache miss to open a reader corresponding to the
+      partial content writer.
+  */
+  inkcoreapi Action *open_read(Continuation *cont, CacheVConnection *writer, HTTPHdr *client_request_hdr);
+
+  Action *open_read_buffer(Continuation *cont, MIOBuffer *buf, CacheKey *key, CacheFragType frag_type = CACHE_FRAG_TYPE_NONE,
+                           char *hostname = 0, int host_len = 0);
+
   inkcoreapi Action *open_write(Continuation *cont, CacheKey *key, bool cluster_cache_local,
                                 CacheFragType frag_type = CACHE_FRAG_TYPE_NONE, int expected_size = CACHE_EXPECTED_SIZE,
                                 int options = 0, time_t pin_in_cache = (time_t)0, char *hostname = 0, int host_len = 0);
@@ -124,6 +137,9 @@ struct CacheProcessor : public Processor {
   */
   bool has_online_storage() const;
 
+  /** Get the target fragment size. */
+  int64_t get_fixed_fragment_size() const;
+
   static int IsCacheEnabled();
 
   static bool IsCacheReady(CacheFragType type);
@@ -189,6 +205,62 @@ struct CacheVConnection : public VConnection {
 #ifdef HTTP_CACHE
   virtual void set_http_info(CacheHTTPInfo *info) = 0;
   virtual void get_http_info(CacheHTTPInfo **info) = 0;
+
+  /** Get the boundary string for a multi-part range response.
+      The length of the string is returned in @a len.
+
+      @return A point to the string.
+   */
+  virtual char const *get_http_range_boundary_string(int *len) const = 0;
+
+  /** Get the effective content size.
+
+      This is the amount of actual data based on any range or framing.  Effectively this is the
+      value to be passed to the @c VIO while the content length is used in the HTTP header.
+  */
+  virtual int64_t get_effective_content_size() = 0;
+
+  /** Set the origin reported content size.
+
+      This is the content length reported by the origin server and should be considered a hint, not
+      definitive. The object size, as stored in the cache, is the actual amount of data received and
+      cached.
+
+      @note This is the total content length as reported in the HTTP header, not the partial (range based) response size.
+      Also this is the length of the HTTP content, which may differ from the size of the data stream.
+  */
+  virtual void set_full_content_length(int64_t) = 0;
+
+  /** Set the output ranges for the content.
+   */
+  virtual void set_content_range(HTTPRangeSpec const &range) = 0;
+
+  /// Get the unchanged ranges for the request range @a req.
+  /// If @a req is empty it is treated as a full request (non-partial).
+  /// @return @c true if the @a result is not empty.
+  /// @internal Currently this just returns the single range that is convex hull of the uncached request.
+  /// Someday we may want to do the exact range spec but we use the type for now because it's easier.
+  virtual bool
+  get_uncached(HTTPRangeSpec const &req, HTTPRangeSpec &result, int64_t initial)
+  {
+    (void)req;
+    (void)result;
+    (void)initial;
+    return false;
+  }
+
+  /** Set the range for the input (response content).
+      The incoming bytes will be written to this section of the object.
+      @note This range @b must be absolute.
+      @note The range is inclusive.
+      @return The # of bytes in the range.
+  */
+  virtual int64_t
+  set_inbound_range(int64_t min, int64_t max)
+  {
+    return 1 + (max - min);
+  }
+
 #endif
 
   virtual bool is_ram_cache_hit() const = 0;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/I_CacheDefs.h
----------------------------------------------------------------------
diff --git a/iocore/cache/I_CacheDefs.h b/iocore/cache/I_CacheDefs.h
index 941ff0e..02ce264 100644
--- a/iocore/cache/I_CacheDefs.h
+++ b/iocore/cache/I_CacheDefs.h
@@ -21,6 +21,7 @@
   limitations under the License.
  */
 
+#include <vector>
 
 #ifndef _I_CACHE_DEFS_H__
 #define _I_CACHE_DEFS_H__
@@ -32,7 +33,7 @@
 #define CACHE_ALT_INDEX_DEFAULT -1
 #define CACHE_ALT_REMOVED -2
 
-#define CACHE_DB_MAJOR_VERSION 24
+#define CACHE_DB_MAJOR_VERSION 25
 #define CACHE_DB_MINOR_VERSION 0
 
 #define CACHE_DIR_MAJOR_VERSION 18
@@ -144,4 +145,5 @@ struct HttpCacheKey {
    word(2) - tag (lower bits), hosttable hash (upper bits)
    word(3) - ram cache hash, lookaside cache
  */
+
 #endif // __CACHE_DEFS_H__

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/P_CacheBC.h
----------------------------------------------------------------------
diff --git a/iocore/cache/P_CacheBC.h b/iocore/cache/P_CacheBC.h
index 2164692..2ffd4e8 100644
--- a/iocore/cache/P_CacheBC.h
+++ b/iocore/cache/P_CacheBC.h
@@ -33,9 +33,9 @@ namespace cache_bc
 */
 
 typedef HTTPHdr HTTPHdr_v21;
+typedef HTTPHdr HTTPHdr_v23;
 typedef HdrHeap HdrHeap_v23;
 typedef CryptoHash CryptoHash_v23;
-typedef HTTPCacheAlt HTTPCacheAlt_v23;
 
 /** Cache backwards compatibility structure - the fragment table.
     This is copied from @c HTTPCacheAlt in @c HTTP.h.
@@ -120,6 +120,34 @@ struct Doc_v23 {
   size_t data_len();
 };
 
+struct HTTPCacheAlt_v23 {
+  uint32_t m_magic;
+  int32_t m_writeable;
+  int32_t m_unmarshal_len;
+
+  int32_t m_id;
+  int32_t m_rid;
+
+  int32_t m_object_key[4];
+  int32_t m_object_size[2];
+
+  HTTPHdr_v23 m_request_hdr;
+  HTTPHdr_v23 m_response_hdr;
+
+  time_t m_request_sent_time;
+  time_t m_response_received_time;
+
+  int m_frag_offset_count;
+  typedef uint64_t FragOffset;
+  FragOffset *m_frag_offsets;
+  static int const N_INTEGRAL_FRAG_OFFSETS = 4;
+  FragOffset m_integral_frag_offsets[N_INTEGRAL_FRAG_OFFSETS];
+
+  RefCountObj *m_ext_buffer;
+};
+
+typedef HTTPCacheAlt_v23 HTTPCacheAlt_v24; // no changes between these versions.
+
 static size_t const sizeofDoc_v23 = sizeof(Doc_v23);
 char *
 Doc_v23::data()

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/P_CacheDir.h
----------------------------------------------------------------------
diff --git a/iocore/cache/P_CacheDir.h b/iocore/cache/P_CacheDir.h
index c128537..36cf51d 100644
--- a/iocore/cache/P_CacheDir.h
+++ b/iocore/cache/P_CacheDir.h
@@ -213,48 +213,79 @@ struct FreeDir {
 #define dir_prev(_e) (_e)->w[2]
 #define dir_set_prev(_e, _o) (_e)->w[2] = (uint16_t)(_o)
 
-// INKqa11166 - Cache can not store 2 HTTP alternates simultaneously.
-// To allow this, move the vector from the CacheVC to the OpenDirEntry.
-// Each CacheVC now maintains a pointer to this vector. Adding/Deleting
-// alternates from this vector is done under the Vol::lock. The alternate
-// is deleted/inserted into the vector just before writing the vector disk
-// (CacheVC::updateVector).
-LINK_FORWARD_DECLARATION(CacheVC, opendir_link) // forward declaration
 struct OpenDirEntry {
-  DLL<CacheVC, Link_CacheVC_opendir_link> writers; // list of all the current writers
-  DLL<CacheVC, Link_CacheVC_opendir_link> readers; // list of all the current readers - not used
-  CacheHTTPInfoVector vector;                      // Vector for the http document. Each writer
-                                                   // maintains a pointer to this vector and
-                                                   // writes it down to disk.
-  CacheKey single_doc_key;                         // Key for the resident alternate.
-  Dir single_doc_dir;                              // Directory for the resident alternate
-  Dir first_dir;                                   // Dir for the vector. If empty, a new dir is
-                                                   // inserted, otherwise this dir is overwritten
-  uint16_t num_writers;                            // num of current writers
-  uint16_t max_writers;                            // max number of simultaneous writers allowed
-  bool dont_update_directory;                      // if set, the first_dir is not updated.
-  bool move_resident_alt;                          // if set, single_doc_dir is inserted.
-  volatile bool reading_vec;                       // somebody is currently reading the vector
-  volatile bool writing_vec;                       // somebody is currently writing the vector
+  typedef OpenDirEntry self; ///< Self reference type.
+
+  Ptr<ProxyMutex> mutex;
+
+  /// Vector for the http document. Each writer maintains a pointer to this vector and writes it down to disk.
+  CacheHTTPInfoVector vector;
+  CacheKey first_key;         ///< Key for first doc for this object.
+  CacheKey single_doc_key;    // Key for the resident alternate.
+  Dir single_doc_dir;         // Directory for the resident alternate
+  Dir first_dir;              // Dir for the vector. If empty, a new dir is
+                              // inserted, otherwise this dir is overwritten
+  uint16_t num_active;        // num of VCs working with this entry
+  uint16_t max_writers;       // max number of simultaneous writers allowed
+  bool dont_update_directory; // if set, the first_dir is not updated.
+  bool move_resident_alt;     // if set, single_doc_dir is inserted.
+  volatile bool reading_vec;  // somebody is currently reading the vector
+  volatile bool writing_vec;  // somebody is currently writing the vector
+
+  /** Set to a write @c CacheVC that has started but not yet updated the vector.
+
+      If this is set then there is a write @c CacheVC that is active but has not yet been able to
+      update the vector for its alternate. Any new reader should block on open if this is set and
+      enter itself on the @a _waiting list, making this effectively a write lock on the object.
+      This is necessary because we can't reliably do alternate selection in this state. The waiting
+      read @c CacheVC instances are released as soon as the vector is updated, they do not have to
+      wait until the write @c CacheVC has finished its transaction. In practice this means until the
+      server response has been received and processed.
+  */
+  volatile CacheVC *open_writer;
+  /** A list of @c CacheVC instances that are waiting for the @a open_writer.
+   */
+  DLL<CacheVC, Link_CacheVC_Active_Link> open_waiting;
 
   LINK(OpenDirEntry, link);
 
-  int wait(CacheVC *c, int msec);
-
-  bool
-  has_multiple_writers()
-  {
-    return num_writers > 1;
-  }
+  //  int wait(CacheVC *c, int msec);
+
+  /// Get the alternate index for the @a key.
+  int index_of(CacheKey const &key);
+  /// Check if there are any writers for the alternate of @a alt_key.
+  bool has_writer(CacheKey const &alt_key);
+  /// Mark a @c CacheVC as actively writing at @a offset on the alternate with @a alt_key.
+  self &write_active(CacheKey const &alt_key, CacheVC *vc, int64_t offset);
+  /// Mark an active write by @a vc as complete and indicate whether it had @a success.
+  /// If the write is not @a success then the fragment is not marked as cached.
+  self &write_complete(CacheKey const &alt_key, CacheVC *vc, bool success = true);
+  /// Indicate if a VC is currently writing to the fragment with this @a offset.
+  bool is_write_active(CacheKey const &alt_key, int64_t offset);
+  /// Get the fragment key for a specific @a offset.
+  CacheKey const &key_for(CacheKey const &alt_key, int64_t offset);
+  /** Wait for a fragment to be written.
+
+      @return @c false if there is no writer that is scheduled to write that fragment.
+   */
+  bool wait_for(CacheKey const &alt_key, CacheVC *vc, int64_t offset);
+  /// Close out anything related to this writer
+  self &close_writer(CacheKey const &alt_key, CacheVC *vc);
 };
 
 struct OpenDir : public Continuation {
-  Queue<CacheVC, Link_CacheVC_opendir_link> delayed_readers;
+  typedef Queue<CacheVC, Link_CacheVC_OpenDir_Link> CacheVCQ;
+  CacheVCQ delayed_readers;
+
   DLL<OpenDirEntry> bucket[OPEN_DIR_BUCKETS];
 
-  int open_write(CacheVC *c, int allow_if_writers, int max_writers);
-  int close_write(CacheVC *c);
-  OpenDirEntry *open_read(const CryptoHash *key);
+  /** Open a live directory entry for @a vc.
+
+      @a force_p is set to @c true to force the entry if it's not already there.
+  */
+  OpenDirEntry *open_entry(Vol *vol, CryptoHash const &key, bool force_p = false);
+  void close_entry(CacheVC *c);
+  //  OpenDirEntry *open_read(CryptoHash *key);
   int signal_readers(int event, Event *e);
 
   OpenDir();


[2/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpCacheSM.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpCacheSM.h b/proxy/http/HttpCacheSM.h
index 3c14da3..0dbc2ae 100644
--- a/proxy/http/HttpCacheSM.h
+++ b/proxy/http/HttpCacheSM.h
@@ -70,8 +70,12 @@ public:
 
   Action *open_read(const HttpCacheKey *key, URL *url, HTTPHdr *hdr, CacheLookupHttpConfig *params, time_t pin_in_cache);
 
-  Action *open_write(const HttpCacheKey *key, URL *url, HTTPHdr *request, CacheHTTPInfo *old_info, time_t pin_in_cache, bool retry,
-                     bool allow_multiple);
+  /** Open a cache read VC for the same object as the writer.
+      @return @c true if there was no reader and one was successfully created from the writer.
+  */
+  Action *open_partial_read(HTTPHdr *client_request_hdr);
+
+  Action *open_write(const HttpCacheKey *key, URL *url, HTTPHdr *request, CacheHTTPInfo *old_info, time_t pin_in_cache, bool retry, bool allow_multiple);
 
   CacheVConnection *cache_read_vc;
   CacheVConnection *cache_write_vc;
@@ -149,6 +153,7 @@ private:
 
   int state_cache_open_read(int event, void *data);
   int state_cache_open_write(int event, void *data);
+  int state_cache_open_partial_read(int evid, void *data);
 
   HttpCacheAction captive_action;
   bool open_read_cb;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpDebugNames.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpDebugNames.cc b/proxy/http/HttpDebugNames.cc
index 7db357d..ea293df 100644
--- a/proxy/http/HttpDebugNames.cc
+++ b/proxy/http/HttpDebugNames.cc
@@ -386,6 +386,8 @@ HttpDebugNames::get_action_name(HttpTransact::StateMachineAction_t e)
     return ("SM_ACTION_API_POST_REMAP");
   case HttpTransact::SM_ACTION_POST_REMAP_SKIP:
     return ("SM_ACTION_POST_REMAP_SKIP");
+  case HttpTransact::SM_ACTION_CACHE_OPEN_PARTIAL_READ:
+    return "SM_ACTION_CACHE_OPEN_PARTIAL_READ";
   }
 
   return ("unknown state name");

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpSM.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc
index 0e37f22..6d270f9 100644
--- a/proxy/http/HttpSM.cc
+++ b/proxy/http/HttpSM.cc
@@ -253,10 +253,11 @@ HttpVCTable::cleanup_all()
     DebugSM("http", "[%" PRId64 "] [%s, %s]", sm_id, #state_name, HttpDebugNames::get_event_name(event)); \
   }
 
-#define HTTP_SM_SET_DEFAULT_HANDLER(_h) \
-  {                                     \
-    REMEMBER(-1, reentrancy_count);     \
-    default_handler = _h;               \
+#define HTTP_SM_SET_DEFAULT_HANDLER(_h)                                          \
+  {                                                                              \
+    REMEMBER(-1, reentrancy_count);                                              \
+    Debug("amc", "SM %" PRId64 " default handler = %s", sm_id, handlerName(_h)); \
+    default_handler = _h;                                                        \
   }
 
 
@@ -363,7 +364,6 @@ HttpSM::init()
   t_state.force_dns = (ip_rule_in_CacheControlTable() || t_state.parent_params->ParentTable->ipMatch ||
                        !(t_state.txn_conf->doc_in_cache_skip_dns) || !(t_state.txn_conf->cache_http));
 
-  http_parser.m_allow_non_http = t_state.http_config_param->parser_allow_non_http;
   http_parser_init(&http_parser);
 
   SET_HANDLER(&HttpSM::main_handler);
@@ -617,39 +617,26 @@ HttpSM::state_read_client_request_header(int event, void *data)
   // We need to handle EOS as well as READ_READY because the client
   // may have sent all of the data already followed by a fIN and that
   // should be OK.
-  if (is_transparent_passthrough_allowed() && ua_raw_buffer_reader != NULL) {
-    bool do_blind_tunnel = false;
-    // If we had a parse error and we're done reading data
-    // blind tunnel
-    if ((event == VC_EVENT_READ_READY || event == VC_EVENT_EOS) && state == PARSE_ERROR) {
-      do_blind_tunnel = true;
-
-      // If we had a GET request that has data after the
-      // get request, do blind tunnel
-    } else if (state == PARSE_DONE && t_state.hdr_info.client_request.method_get_wksidx() == HTTP_WKSIDX_GET &&
-               ua_raw_buffer_reader->read_avail() > 0 && !t_state.hdr_info.client_request.is_keep_alive_set()) {
-      do_blind_tunnel = true;
-    }
-    if (do_blind_tunnel) {
-      DebugSM("http", "[%" PRId64 "] first request on connection failed parsing, switching to passthrough.", sm_id);
-
-      t_state.transparent_passthrough = true;
-      http_parser_clear(&http_parser);
-
-      // Turn off read eventing until we get the
-      // blind tunnel infrastructure set up
-      ua_session->get_netvc()->do_io_read(this, 0, NULL);
-
-      /* establish blind tunnel */
-      setup_blind_tunnel_port();
+  if ((event == VC_EVENT_READ_READY || event == VC_EVENT_EOS) && state == PARSE_ERROR && is_transparent_passthrough_allowed() &&
+      ua_raw_buffer_reader != NULL) {
+    DebugSM("http", "[%" PRId64 "] first request on connection failed parsing, switching to passthrough.", sm_id);
 
-      // Setting half close means we will send the FIN when we've written all of the data.
-      if (event == VC_EVENT_EOS) {
-        this->set_ua_half_close_flag();
-        t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
-      }
-      return 0;
+    t_state.transparent_passthrough = true;
+    http_parser_clear(&http_parser);
+
+    // Turn off read eventing until we get the
+    // blind tunnel infrastructure set up
+    ua_session->get_netvc()->do_io_read(this, 0, NULL);
+
+    /* establish blind tunnel */
+    setup_blind_tunnel_port();
+
+    // Setting half close means we will send the FIN when we've written all of the data.
+    if (event == VC_EVENT_EOS) {
+      this->set_ua_half_close_flag();
+      t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
     }
+    return 0;
   }
 
   // Check to see if we are done parsing the header
@@ -1450,6 +1437,8 @@ HttpSM::state_api_callout(int event, void *data)
 void
 HttpSM::handle_api_return()
 {
+  HttpTunnelProducer *p = 0; // used as a scratch var in various cases.
+
   switch (t_state.api_next_action) {
   case HttpTransact::SM_ACTION_API_SM_START:
     if (t_state.client_info.port_attribute == HttpProxyPort::TRANSPORT_BLIND_TUNNEL) {
@@ -1496,12 +1485,11 @@ HttpSM::handle_api_return()
   }
 
   switch (t_state.next_action) {
-  case HttpTransact::SM_ACTION_TRANSFORM_READ: {
-    HttpTunnelProducer *p = setup_transfer_from_transform();
+  case HttpTransact::SM_ACTION_TRANSFORM_READ:
+    p = setup_transfer_from_transform();
     perform_transform_cache_write_action();
     tunnel.tunnel_run(p);
     break;
-  }
   case HttpTransact::SM_ACTION_SERVER_READ: {
     if (unlikely(t_state.did_upgrade_succeed)) {
       // We've sucessfully handled the upgrade, let's now setup
@@ -1512,14 +1500,30 @@ HttpSM::handle_api_return()
 
       setup_blind_tunnel(true);
     } else {
-      HttpTunnelProducer *p = setup_server_transfer();
-      perform_cache_write_action();
-      tunnel.tunnel_run(p);
+      if ((t_state.range_setup == HttpTransact::RANGE_PARTIAL_WRITE || t_state.range_setup == HttpTransact::RANGE_PARTIAL_UPDATE) &&
+          HttpTransact::CACHE_DO_WRITE == t_state.cache_info.action) {
+        Debug("amc", "Set up for partial read");
+        CacheVConnection *save_write_vc = cache_sm.cache_write_vc;
+        tunnel.tunnel_run(setup_server_transfer_to_cache_only());
+        t_state.next_action = HttpTransact::SM_ACTION_CACHE_OPEN_PARTIAL_READ;
+        t_state.source = HttpTransact::SOURCE_CACHE;
+        HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_partial_read);
+        cache_sm.cache_write_vc = save_write_vc;
+        // Close the read VC if it's there because it's less work than trying to reset the existing
+        // one (which doesn't have the ODE attached).
+        cache_sm.close_read();
+        pending_action = cache_sm.open_partial_read(&t_state.hdr_info.client_request);
+        cache_sm.cache_write_vc = NULL;
+      } else {
+        p = setup_server_transfer();
+        perform_cache_write_action();
+        tunnel.tunnel_run(p);
+      }
     }
     break;
   }
   case HttpTransact::SM_ACTION_SERVE_FROM_CACHE: {
-    HttpTunnelProducer *p = setup_cache_read_transfer();
+    p = setup_cache_read_transfer();
     tunnel.tunnel_run(p);
     break;
   }
@@ -2355,7 +2359,6 @@ HttpSM::state_cache_open_write(int event, void *data)
     // The write vector was locked and the cache_sm retried
     // and got the read vector again.
     cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
-    // ToDo: Should support other levels of cache hits here, but the cache does not support it (yet)
     if (cache_sm.cache_read_vc->is_ram_cache_hit()) {
       t_state.cache_info.hit_miss_code = SQUID_HIT_RAM;
     } else {
@@ -2443,7 +2446,7 @@ HttpSM::state_cache_open_read(int event, void *data)
     t_state.source = HttpTransact::SOURCE_CACHE;
 
     cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
-    // ToDo: Should support other levels of cache hits here, but the cache does not support it (yet)
+    // ToDo: Should support other levels of cache hits here, but the cache does
     if (cache_sm.cache_read_vc->is_ram_cache_hit()) {
       t_state.cache_info.hit_miss_code = SQUID_HIT_RAM;
     } else {
@@ -2481,6 +2484,62 @@ HttpSM::state_cache_open_read(int event, void *data)
   return 0;
 }
 
+//////////////////////////////////////////////////////////////////////////
+//
+//  HttpSM::state_cache_open_read_from_writer()
+//
+//  Handle the case where a partial request had a cache miss and we sent
+//  a request to the origin which has now come back successfully. We
+//  need to create a reader cache VC to handle the read side of the
+//  operation.
+//////////////////////////////////////////////////////////////////////////
+int
+HttpSM::state_cache_open_partial_read(int event, void *data)
+{
+  STATE_ENTER(&HttpSM::state_cache_open_partial_read, event);
+
+  //  ink_assert(NULL != cache_sm.cache_write_vc);
+  Debug("amc", "Handling partial read event");
+
+  switch (event) {
+  case CACHE_EVENT_OPEN_READ:
+    pending_action = NULL;
+
+    DebugSM("http", "[%" PRId64 "] cache_open_partial_read - CACHE_EVENT_OPEN_READ", sm_id);
+
+    ink_assert(cache_sm.cache_read_vc != NULL);
+
+    cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
+    ink_assert(t_state.cache_info.object_read != 0);
+    cache_sm.cache_read_vc->set_content_range(t_state.hdr_info.request_range);
+
+    t_state.next_action = HttpTransact::SM_ACTION_SERVE_FROM_CACHE;
+    t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
+
+    do_api_callout();
+    break;
+  case CACHE_EVENT_OPEN_READ_FAILED:
+    pending_action = NULL;
+
+    DebugSM("http", "[%" PRId64 "] cache_open_partial_read - "
+                    "CACHE_EVENT_OPEN_READ_FAILED",
+            sm_id);
+
+    // Need to do more here - mainly fall back to bypass from origin.
+    // Although we've got a serious problem if we don't open in this situation.
+    ink_assert("[amc] do something!");
+    break;
+
+
+  default:
+    // When the SM is in this state we've already started a tunnel running so we have to handle
+    // that case in here so unless it's an event of interest to this state, pass it on.
+    return this->tunnel_handler(event, data);
+  }
+
+  return 0;
+}
+
 int
 HttpSM::main_handler(int event, void *data)
 {
@@ -2747,6 +2806,9 @@ HttpSM::tunnel_handler(int event, void *data)
 {
   STATE_ENTER(&HttpSM::tunnel_handler, event);
 
+  if (CACHE_EVENT_OPEN_READ == event)
+    return 0;
+
   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
   ink_assert(data == &tunnel);
   // The tunnel calls this when it is done
@@ -2960,7 +3022,6 @@ HttpSM::tunnel_handler_server(int event, HttpTunnelProducer *p)
       ua_session->attach_server_session(server_session);
     } else {
       // Release the session back into the shared session pool
-      server_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->keep_alive_no_activity_timeout_out));
       server_session->release();
     }
   }
@@ -4062,6 +4123,10 @@ HttpSM::do_hostdb_update_if_necessary()
 void
 HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
 {
+  (void)field;
+  (void)content_length;
+  return;
+#if 0
   int prev_good_range = -1;
   const char *value;
   int value_len;
@@ -4109,7 +4174,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
   t_state.range_in_cache = true;
 
   for (; value; value = csv.get_next(&value_len)) {
-    if (!(tmp = (const char *)memchr(value, '-', value_len))) {
+    if (!(tmp = (const char *) memchr(value, '-', value_len))) {
       t_state.range_setup = HttpTransact::RANGE_NONE;
       goto Lfaild;
     }
@@ -4118,8 +4183,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
     s = value;
     e = tmp;
     // skip leading white spaces
-    for (; s < e && ParseRules::is_ws(*s); ++s)
-      ;
+    for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
     if (s >= e)
       start = -1;
@@ -4127,8 +4191,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
       for (start = 0; s < e && *s >= '0' && *s <= '9'; ++s)
         start = start * 10 + (*s - '0');
       // skip last white spaces
-      for (; s < e && ParseRules::is_ws(*s); ++s)
-        ;
+      for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
       if (s < e || start < 0) {
         t_state.range_setup = HttpTransact::RANGE_NONE;
@@ -4140,8 +4203,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
     s = tmp + 1;
     e = value + value_len;
     // skip leading white spaces
-    for (; s < e && ParseRules::is_ws(*s); ++s)
-      ;
+    for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
     if (s >= e) {
       if (start < 0) {
@@ -4156,8 +4218,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
       for (end = 0; s < e && *s >= '0' && *s <= '9'; ++s)
         end = end * 10 + (*s - '0');
       // skip last white spaces
-      for (; s < e && ParseRules::is_ws(*s); ++s)
-        ;
+      for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
       if (s < e || end < 0) {
         t_state.range_setup = HttpTransact::RANGE_NONE;
@@ -4195,9 +4256,10 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
     ranges[nr]._end = end;
     ++nr;
 
-    if (!cache_sm.cache_read_vc->is_pread_capable() && cache_config_read_while_writer == 2) {
+#if 0
+    if (!cache_sm.cache_read_vc->is_pread_capable() && cache_config_read_while_writer==2) {
       // write in progress, check if request range not in cache yet
-      HTTPInfo::FragOffset *frag_offset_tbl = t_state.cache_info.object_read->get_frag_table();
+      HTTPInfo::FragOffset* frag_offset_tbl = t_state.cache_info.object_read->get_frag_table();
       int frag_offset_cnt = t_state.cache_info.object_read->get_frag_offset_count();
 
       if (!frag_offset_tbl || !frag_offset_cnt || (frag_offset_tbl[frag_offset_cnt - 1] < (uint64_t)end)) {
@@ -4205,6 +4267,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
         t_state.range_in_cache = false;
       }
     }
+#endif
   }
 
   if (nr > 0) {
@@ -4220,16 +4283,22 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
 Lfaild:
   t_state.range_in_cache = false;
   t_state.num_range_fields = -1;
-  delete[] ranges;
+  delete []ranges;
   return;
+#endif
 }
 
 void
-HttpSM::calculate_output_cl(int64_t num_chars_for_ct, int64_t num_chars_for_cl)
+HttpSM::calculate_output_cl(int64_t content_length, int64_t num_chars)
 {
+#if 1
+  (void)content_length;
+  (void)num_chars;
+  return;
+#else
   int i;
 
-  if (t_state.range_setup != HttpTransact::RANGE_REQUESTED && t_state.range_setup != HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED)
+  if (t_state.range_setup != HttpTransact::RANGE_REQUESTED)
     return;
 
   ink_assert(t_state.ranges);
@@ -4240,9 +4309,9 @@ HttpSM::calculate_output_cl(int64_t num_chars_for_ct, int64_t num_chars_for_cl)
     for (i = 0; i < t_state.num_range_fields; i++) {
       if (t_state.ranges[i]._start >= 0) {
         t_state.range_output_cl += boundary_size;
-        t_state.range_output_cl += sub_header_size + num_chars_for_ct;
+        t_state.range_output_cl += sub_header_size + content_length;
         t_state.range_output_cl +=
-          num_chars_for_int(t_state.ranges[i]._start) + num_chars_for_int(t_state.ranges[i]._end) + num_chars_for_cl + 2;
+          num_chars_for_int(t_state.ranges[i]._start) + num_chars_for_int(t_state.ranges[i]._end) + num_chars + 2;
         t_state.range_output_cl += t_state.ranges[i]._end - t_state.ranges[i]._start + 1;
         t_state.range_output_cl += 2;
       }
@@ -4252,19 +4321,17 @@ HttpSM::calculate_output_cl(int64_t num_chars_for_ct, int64_t num_chars_for_cl)
   }
 
   Debug("http_range", "Pre-calculated Content-Length for Range response is %" PRId64, t_state.range_output_cl);
+#endif
 }
 
 void
 HttpSM::do_range_parse(MIMEField *range_field)
 {
-  int num_chars_for_ct = 0;
-  t_state.cache_info.object_read->response_get()->value_get(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE, &num_chars_for_ct);
-
   int64_t content_length = t_state.cache_info.object_read->object_size_get();
   int64_t num_chars_for_cl = num_chars_for_int(content_length);
 
   parse_range_and_compare(range_field, content_length);
-  calculate_output_cl(num_chars_for_ct, num_chars_for_cl);
+  calculate_output_cl(content_length, num_chars_for_cl);
 }
 
 // this function looks for any Range: headers, parses them and either
@@ -4273,6 +4340,9 @@ HttpSM::do_range_parse(MIMEField *range_field)
 void
 HttpSM::do_range_setup_if_necessary()
 {
+#if 1
+  t_state.range_setup = HttpTransact::RANGE_NONE;
+#else
   MIMEField *field;
   INKVConnInternal *range_trans;
   int field_content_type_len = -1;
@@ -4310,6 +4380,7 @@ HttpSM::do_range_setup_if_necessary()
       }
     }
   }
+#endif
 }
 
 
@@ -5479,15 +5550,16 @@ HttpSM::perform_cache_write_action()
     break;
   }
 
-  case HttpTransact::CACHE_DO_WRITE:
-  case HttpTransact::CACHE_DO_REPLACE:
+  case HttpTransact::CACHE_DO_WRITE: {
     // Fix need to set up delete for after cache write has
     //   completed
+
     if (transform_info.entry == NULL || t_state.api_info.cache_untransformed == true) {
-      cache_sm.close_read();
       t_state.cache_info.write_status = HttpTransact::CACHE_WRITE_IN_PROGRESS;
       setup_cache_write_transfer(&cache_sm, server_entry->vc, &t_state.cache_info.object_store, client_response_hdr_bytes,
                                  "cache write");
+
+      cache_sm.close_read();
     } else {
       // We are not caching the untransformed.  We might want to
       //  use the cache writevc to cache the transformed copy
@@ -5496,7 +5568,7 @@ HttpSM::perform_cache_write_action()
       cache_sm.cache_write_vc = NULL;
     }
     break;
-
+  }
   default:
     ink_release_assert(0);
     break;
@@ -5719,7 +5791,8 @@ HttpSM::setup_cache_read_transfer()
 
   ink_assert(cache_sm.cache_read_vc != NULL);
 
-  doc_size = t_state.cache_info.object_read->object_size_get();
+  //  doc_size = t_state.cache_info.object_read->object_size_get();
+  doc_size = cache_sm.cache_read_vc->get_effective_content_size();
   alloc_index = buffer_size_to_index(doc_size + index_to_buffer_size(HTTP_HEADER_BUFFER_SIZE_INDEX));
 
 #ifndef USE_NEW_EMPTY_MIOBUFFER
@@ -5741,8 +5814,14 @@ HttpSM::setup_cache_read_transfer()
 
   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
 
-  if (doc_size != INT64_MAX)
+  if (doc_size != INT64_MAX) {
+    /* Brokenness - if the object was already in cache, @a doc_size is correct based on the range because the
+       CacheVC had a chance to do that on the way here, but if not then the read CacheVC isn't fully set up
+       and doesn't account for the range data, so we do it. That needs to be rationalized.
+    */
+    doc_size = t_state.hdr_info.request_range.calcContentLength(doc_size, 0);
     doc_size += hdr_size;
+  }
 
   HttpTunnelProducer *p = tunnel.add_producer(cache_sm.cache_read_vc, doc_size, buf_start, &HttpSM::tunnel_handler_cache_read,
                                               HT_CACHE_READ, "cache read");
@@ -5795,15 +5874,26 @@ void
 HttpSM::setup_cache_write_transfer(HttpCacheSM *c_sm, VConnection *source_vc, HTTPInfo *store_info, int64_t skip_bytes,
                                    const char *name)
 {
+  bool partial_update_p = HttpTransact::RANGE_PARTIAL_UPDATE == t_state.range_setup;
   ink_assert(c_sm->cache_write_vc != NULL);
   ink_assert(t_state.request_sent_time > 0);
   ink_assert(t_state.response_received_time > 0);
+  ink_assert(store_info->valid() || partial_update_p);
+
+  if (!partial_update_p) {
+    store_info->request_sent_time_set(t_state.request_sent_time);
+    store_info->response_received_time_set(t_state.response_received_time);
 
-  store_info->request_sent_time_set(t_state.request_sent_time);
-  store_info->response_received_time_set(t_state.response_received_time);
+    if (t_state.hdr_info.response_range.isValid() && t_state.hdr_info.response_content_size != HTTP_UNDEFINED_CL)
+      store_info->object_size_set(t_state.hdr_info.response_content_size);
+
+    c_sm->cache_write_vc->set_http_info(store_info);
+    store_info->clear();
+  }
+
+  if (t_state.hdr_info.response_range.isValid())
+    c_sm->cache_write_vc->set_inbound_range(t_state.hdr_info.response_range._min, t_state.hdr_info.response_range._max);
 
-  c_sm->cache_write_vc->set_http_info(store_info);
-  store_info->clear();
 
   tunnel.add_consumer(c_sm->cache_write_vc, source_vc, &HttpSM::tunnel_handler_cache_write, HT_CACHE_WRITE, name, skip_bytes);
 
@@ -6155,7 +6245,7 @@ HttpSM::setup_transfer_from_transform_to_cache_only()
   return p;
 }
 
-void
+HttpTunnelProducer *
 HttpSM::setup_server_transfer_to_cache_only()
 {
   TunnelChunkingAction_t action;
@@ -6183,6 +6273,7 @@ HttpSM::setup_server_transfer_to_cache_only()
   setup_cache_write_transfer(&cache_sm, server_entry->vc, &t_state.cache_info.object_store, 0, "cache write");
 
   server_entry->in_tunnel = true;
+  return p;
 }
 
 HttpTunnelProducer *
@@ -6874,15 +6965,7 @@ HttpSM::set_next_state()
   case HttpTransact::SM_ACTION_DNS_LOOKUP: {
     sockaddr const *addr;
 
-    if ((strncmp(t_state.dns_info.lookup_name, "127.0.0.1", 9) == 0 || strncmp(t_state.dns_info.lookup_name, "::1", 3) == 0) &&
-        ats_ip_pton(t_state.dns_info.lookup_name, t_state.host_db_info.ip()) == 0) {
-      // If it's 127.0.0.1 or ::1 don't bother with hostdb
-      DebugSM("dns", "[HttpTransact::HandleRequest] Skipping DNS lookup for %s because it's loopback",
-              t_state.dns_info.lookup_name);
-      t_state.dns_info.lookup_success = true;
-      call_transact_and_set_next_state(NULL);
-      break;
-    } else if (t_state.api_server_addr_set) {
+    if (t_state.api_server_addr_set) {
       /* If the API has set the server address before the OS DNS lookup
        * then we can skip the lookup
        */
@@ -7008,6 +7091,16 @@ HttpSM::set_next_state()
     break;
   }
 
+  case HttpTransact::SM_ACTION_CACHE_OPEN_PARTIAL_READ: {
+#if 0
+      HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_partial_read);
+      t_state.source = HttpTransact::SOURCE_CACHE;
+      pending_action = cache_sm.open_partial_read();
+#endif
+    ink_assert(!"[amc] Shouldn't get here");
+    break;
+  }
+
   case HttpTransact::SM_ACTION_SERVER_READ: {
     t_state.source = HttpTransact::SOURCE_HTTP_ORIGIN_SERVER;
 
@@ -7501,9 +7594,6 @@ HttpSM::redirect_request(const char *redirect_url, const int redirect_len)
         // the client request didn't have a host, so use the current origin host
         DebugSM("http_redirect", "[HttpSM::redirect_request] keeping client request host %s://%s", next_hop_scheme, origHost);
         char *origHost1 = strtok_r(origHost, ":", &saveptr);
-        if (origHost1 == NULL) {
-          goto LhostError;
-        }
         origHost_len = strlen(origHost1);
         int origHostPort_len = origHost_len;
         char buf[origHostPort_len + 7];
@@ -7537,7 +7627,6 @@ HttpSM::redirect_request(const char *redirect_url, const int redirect_len)
         t_state.hdr_info.client_request.m_target_cached = false;
         clientUrl.scheme_set(scheme_str, scheme_len);
       } else {
-      LhostError:
         // the server request didn't have a host, so remove it from the headers
         t_state.hdr_info.client_request.field_delete(MIME_FIELD_HOST, MIME_LEN_HOST);
       }
@@ -7641,3 +7730,14 @@ HttpSM::is_redirect_required()
   }
   return redirect_required;
 }
+
+char const *
+HttpSM::handlerName(int (HttpSM::*ptm)(int, void *))
+{
+  char const *zret = "*method*";
+  if (ptm == &HttpSM::tunnel_handler)
+    zret = "tunnel_handler";
+  else if (ptm == &HttpSM::state_cache_open_partial_read)
+    zret = "state_cache_open_partial_read";
+  return zret;
+}

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpSM.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpSM.h b/proxy/http/HttpSM.h
index 47f98eb..5221659 100644
--- a/proxy/http/HttpSM.h
+++ b/proxy/http/HttpSM.h
@@ -254,6 +254,7 @@ public:
   // Debugging routines to dump the SM history, hdrs
   void dump_state_on_assert();
   void dump_state_hdr(HTTPHdr *h, const char *s);
+  char const *handlerName(int (HttpSM::*ptm)(int, void *));
 
   // Functions for manipulating api hooks
   void txn_hook_append(TSHttpHookID id, INKContInternal *cont);
@@ -379,6 +380,7 @@ protected:
   // Cache Handlers
   int state_cache_open_read(int event, void *data);
   int state_cache_open_write(int event, void *data);
+  int state_cache_open_partial_read(int event, void *data);
 
   // Http Server Handlers
   int state_http_server_open(int event, void *data);
@@ -448,7 +450,7 @@ protected:
   void setup_server_send_request();
   void setup_server_send_request_api();
   HttpTunnelProducer *setup_server_transfer();
-  void setup_server_transfer_to_cache_only();
+  HttpTunnelProducer *setup_server_transfer_to_cache_only();
   HttpTunnelProducer *setup_cache_read_transfer();
   void setup_internal_transfer(HttpSMHandler handler);
   void setup_error_transfer();

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransact.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc
index f925775..5a56f9c 100644
--- a/proxy/http/HttpTransact.cc
+++ b/proxy/http/HttpTransact.cc
@@ -47,8 +47,12 @@
 #include "HttpClientSession.h"
 #include "I_Machine.h"
 
-static char range_type[] = "multipart/byteranges; boundary=RANGE_SEPARATOR";
-#define RANGE_NUMBERS_LENGTH 60
+static char const HTTP_RANGE_MULTIPART_CONTENT_TYPE[] = "multipart/byteranges; boundary=";
+
+/// If the intial uncached segment is less than this, expand the request to include the earliest fragment.
+/// Hardwired for now, this needs to be promoted to a config var at some point. It should also be a multiple
+/// of the fragment size.
+static int64_t const MIN_INITIAL_UNCACHED = 4 * 1 << 20;
 
 #define HTTP_INCREMENT_TRANS_STAT(X) update_stat(s, X, 1);
 #define HTTP_SUM_TRANS_STAT(X, S) update_stat(s, X, (ink_statval_t)S);
@@ -1799,7 +1803,7 @@ HttpTransact::OSDNSLookup(State *s)
     } else {
       if ((s->cache_info.action == CACHE_DO_NO_ACTION) &&
           (((s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE) && !s->txn_conf->cache_range_write) ||
-            s->range_setup == RANGE_NOT_SATISFIABLE || s->range_setup == RANGE_NOT_HANDLED))) {
+            s->range_setup == RANGE_NOT_SATISFIABLE))) {
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadMiss);
       } else if (!s->txn_conf->cache_http || s->cache_lookup_result == HttpTransact::CACHE_LOOKUP_SKIPPED) {
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, LookupSkipOpenServer);
@@ -1807,7 +1811,7 @@ HttpTransact::OSDNSLookup(State *s)
         // from the DNS we need to call LookupSkipOpenServer
       } else if (s->cache_lookup_result == CACHE_LOOKUP_HIT_FRESH || s->cache_lookup_result == CACHE_LOOKUP_HIT_WARNING ||
                  s->cache_lookup_result == CACHE_LOOKUP_HIT_STALE) {
-        // DNS lookup is done if the content is state need to call handle cache open read hit
+        // DNS lookup is done if the content is stale need to call handle cache open read hit
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadHit);
       } else if (s->cache_lookup_result == CACHE_LOOKUP_MISS || s->cache_info.action == CACHE_DO_NO_ACTION) {
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadMiss);
@@ -2556,6 +2560,7 @@ HttpTransact::HandleCacheOpenReadHit(State *s)
   bool needs_cache_auth = false;
   bool server_up = true;
   CacheHTTPInfo *obj;
+  HTTPRangeSpec range;
 
   if (s->api_update_cached_object == HttpTransact::UPDATE_CACHED_OBJECT_CONTINUE) {
     obj = &s->cache_info.object_store;
@@ -2730,6 +2735,31 @@ HttpTransact::HandleCacheOpenReadHit(State *s)
       SET_VIA_STRING(VIA_DETAIL_CACHE_TYPE, VIA_DETAIL_CACHE);
     }
   }
+
+  // Check if we need to get some data from the origin.
+  if (s->state_machine->get_cache_sm().cache_read_vc->get_uncached(s->hdr_info.request_range, range, MIN_INITIAL_UNCACHED)) {
+    Debug("amc", "Request touches uncached fragments");
+    find_server_and_update_current_info(s);
+    if (!ats_is_ip(&s->current.server->addr)) {
+      if (s->current.request_to == PARENT_PROXY) {
+        TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup);
+      } else if (s->current.request_to == ORIGIN_SERVER) {
+        TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup);
+      } else {
+        ink_assert(!"[amc] - where was this going?");
+        return;
+      }
+    }
+    build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->client_info.http_version, &range);
+    s->cache_info.action = CACHE_PREPARE_TO_WRITE;
+    s->range_setup = RANGE_PARTIAL_UPDATE;
+    s->next_action = how_to_open_connection(s);
+    if (s->stale_icp_lookup && s->next_action == SM_ACTION_ORIGIN_SERVER_OPEN) {
+      s->next_action = SM_ACTION_ICP_QUERY;
+    }
+    return;
+  }
+
   // cache hit, document is fresh, does not authorization,
   // is valid, etc. etc. send it back to the client.
   //
@@ -2850,7 +2880,7 @@ HttpTransact::build_response_from_cache(State *s, HTTPWarningCode warning_code)
       // send back the full document to the client.
       DebugTxn("http_trans", "[build_response_from_cache] Match! Serving full document.");
       s->cache_info.action = CACHE_DO_SERVE;
-
+#if 0
       // Check if cached response supports Range. If it does, append
       // Range transformation plugin
       // only if the cached response is a 200 OK
@@ -2885,6 +2915,10 @@ HttpTransact::build_response_from_cache(State *s, HTTPWarningCode warning_code)
         build_response(s, cached_response, &s->hdr_info.client_response, s->client_info.http_version);
       }
       s->next_action = SM_ACTION_SERVE_FROM_CACHE;
+#else
+      build_response(s, cached_response, &s->hdr_info.client_response, s->client_info.http_version);
+      s->next_action = SM_ACTION_SERVE_FROM_CACHE;
+#endif
     }
     // If the client request is a HEAD, then serve the header from cache.
     else if (s->method == HTTP_WKSIDX_HEAD) {
@@ -3072,12 +3106,16 @@ HttpTransact::HandleCacheOpenReadMiss(State *s)
   // We must, however, not cache the responses to these requests.
   if (does_method_require_cache_copy_deletion(s->http_config_param, s->method) && s->api_req_cacheable == false) {
     s->cache_info.action = CACHE_DO_NO_ACTION;
+#if 0
   } else if ((s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE) && !s->txn_conf->cache_range_write) ||
              does_method_effect_cache(s->method) == false || s->range_setup == RANGE_NOT_SATISFIABLE ||
              s->range_setup == RANGE_NOT_HANDLED) {
     s->cache_info.action = CACHE_DO_NO_ACTION;
+#endif
   } else {
     s->cache_info.action = CACHE_PREPARE_TO_WRITE;
+    if (s->hdr_info.request_range.hasRanges())
+      s->range_setup = RANGE_PARTIAL_WRITE;
   }
 
   // We should not issue an ICP lookup if the request has a
@@ -3119,8 +3157,8 @@ HttpTransact::HandleCacheOpenReadMiss(State *s)
         return;
       }
     }
-    build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version);
-
+    build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version,
+                  &s->hdr_info.request_range);
     s->next_action = how_to_open_connection(s);
   } else { // miss, but only-if-cached is set
     build_error_response(s, HTTP_STATUS_GATEWAY_TIMEOUT, "Not Cached", "cache#not_in_cache", NULL);
@@ -4102,6 +4140,7 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
   HTTPStatus client_response_code = HTTP_STATUS_NONE;
   const char *warn_text = NULL;
   bool cacheable = false;
+  HTTPRangeSpec ranges;
 
   cacheable = is_response_cacheable(s, &s->hdr_info.client_request, &s->hdr_info.server_response);
   DebugTxn("http_trans", "[hcoofsr] response %s cacheable", cacheable ? "is" : "is not");
@@ -4317,7 +4356,12 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
     }
 
     s->next_action = SM_ACTION_SERVER_READ;
-    client_response_code = server_response_code;
+    // If we got back 206 but the original request wasn't partial, then we're doing a partial update and need to return 200.
+    // Need to strip Content-Range at some point as well.
+    if (HTTP_STATUS_PARTIAL_CONTENT == server_response_code && s->hdr_info.request_range.isEmpty())
+      client_response_code = HTTP_STATUS_OK;
+    else
+      client_response_code = server_response_code;
     base_response = &s->hdr_info.server_response;
 
     s->negative_caching = is_negative_caching_appropriate(s) && cacheable;
@@ -4423,6 +4467,19 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
     break;
   }
 
+
+#if 0
+  /* If we plan to do a write and the request was partial, then we need to open a
+     cache read to service the request and not just pass through.
+  */
+  if (SM_ACTION_SERVER_READ == s->next_action &&
+      CACHE_DO_WRITE == s->cache_info.action &&
+      s->hdr_info.request_range.hasRanges()
+  ) {
+    s->next_action = SM_ACTION_CACHE_OPEN_PARTIAL_READ;
+  }
+#endif
+
   // update stat, set via string, etc
 
   switch (s->cache_info.action) {
@@ -4481,7 +4538,9 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
   }
   ink_assert(base_response->valid());
 
-  if ((s->cache_info.action == CACHE_DO_WRITE) || (s->cache_info.action == CACHE_DO_REPLACE)) {
+  if (((s->cache_info.action == CACHE_DO_WRITE) || (s->cache_info.action == CACHE_DO_REPLACE)) &&
+      s->range_setup != RANGE_PARTIAL_UPDATE) {
+    // If it's a partial write then we already have the cached headers, no need to pass these in.
     set_headers_for_cache_write(s, &s->cache_info.object_store, &s->hdr_info.server_request, &s->hdr_info.server_response);
   }
   // 304, 412, and 416 responses are handled here
@@ -4525,6 +4584,9 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
     if (((s->next_action == SM_ACTION_SERVE_FROM_CACHE) || (s->next_action == SM_ACTION_SERVER_READ)) &&
         s->state_machine->do_transform_open()) {
       set_header_for_transform(s, base_response);
+    } else if (s->hdr_info.request_range.isEmpty() && s->cache_info.object_read->valid()) {
+      build_response(s, s->cache_info.object_read->response_get(), &s->hdr_info.client_response, s->client_info.http_version);
+      s->hdr_info.client_response.set_content_length(s->cache_info.object_read->object_size_get());
     } else {
       build_response(s, base_response, &s->hdr_info.client_response, s->client_info.http_version, client_response_code);
     }
@@ -4831,6 +4893,12 @@ HttpTransact::set_headers_for_cache_write(State *s, HTTPInfo *cache_info, HTTPHd
   cache_info->request_get()->field_delete(MIME_FIELD_VIA, MIME_LEN_VIA);
   // server 200 Ok for Range request
   cache_info->request_get()->field_delete(MIME_FIELD_RANGE, MIME_LEN_RANGE);
+  if (NULL != cache_info->response_get()->field_find(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE)) {
+    cache_info->response_get()->field_delete(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
+    cache_info->response_get()->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
+    cache_info->response_get()->status_set(HTTP_STATUS_OK);
+    cache_info->response_get()->reason_set(HTTP_STATUS_OK);
+  }
 
   // If we're ignoring auth, then we don't want to cache WWW-Auth
   //  headers
@@ -5205,6 +5273,8 @@ HttpTransact::add_client_ip_to_outgoing_request(State *s, HTTPHdr *request)
 HttpTransact::RequestError_t
 HttpTransact::check_request_validity(State *s, HTTPHdr *incoming_hdr)
 {
+  MIMEField *f; // temp for field checks.
+
   if (incoming_hdr == 0) {
     return NON_EXISTANT_REQUEST_HEADER;
   }
@@ -5324,6 +5394,14 @@ HttpTransact::check_request_validity(State *s, HTTPHdr *incoming_hdr)
     }
   }
 
+  if (0 != (f = incoming_hdr->field_find(MIME_FIELD_RANGE, MIME_LEN_RANGE))) {
+    int len;
+    char const *val = f->value_get(&len);
+    if (!s->hdr_info.request_range.parseRangeFieldValue(val, len))
+      return INVALID_RANGE_FIELD;
+  }
+
+
   return NO_REQUEST_HEADER_ERROR;
 }
 
@@ -5669,6 +5747,8 @@ HttpTransact::initialize_state_variables_from_request(State *s, HTTPHdr *obsolet
 void
 HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incoming_response)
 {
+  MIMEField *field;
+
   /* check if the server permits caching */
   s->cache_info.directives.does_server_permit_storing =
     HttpTransactHeaders::does_server_allow_response_to_be_stored(&s->hdr_info.server_response);
@@ -5708,8 +5788,7 @@ HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incomi
     // This code used to discriminate CL: headers when the origin disabled keep-alive.
     if (incoming_response->presence(MIME_PRESENCE_CONTENT_LENGTH)) {
       int64_t cl = incoming_response->get_content_length();
-
-      s->hdr_info.response_content_length = (cl >= 0) ? cl : HTTP_UNDEFINED_CL;
+      s->hdr_info.response_content_length = cl < 0 ? HTTP_UNDEFINED_CL : cl;
       s->hdr_info.trust_response_cl = true;
     } else {
       s->hdr_info.response_content_length = HTTP_UNDEFINED_CL;
@@ -5718,8 +5797,7 @@ HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incomi
   }
 
   if (incoming_response->presence(MIME_PRESENCE_TRANSFER_ENCODING)) {
-    MIMEField *field = incoming_response->field_find(MIME_FIELD_TRANSFER_ENCODING, MIME_LEN_TRANSFER_ENCODING);
-    ink_assert(field != NULL);
+    field = incoming_response->field_find(MIME_FIELD_TRANSFER_ENCODING, MIME_LEN_TRANSFER_ENCODING);
 
     HdrCsvIter enc_val_iter;
     int enc_val_len;
@@ -5780,6 +5858,15 @@ HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incomi
     }
   }
 
+  // Get the incoming range to store from the origin.
+  if (NULL != (field = incoming_response->field_find(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE))) {
+    int len;
+    char const *cr = field->value_get(&len);
+    s->hdr_info.response_content_size =
+      HTTPRangeSpec::parseContentRangeFieldValue(cr, len, s->hdr_info.response_range, s->hdr_info.response_range_boundary);
+  }
+
+
   s->current.server->transfer_encoding = NO_TRANSFER_ENCODING;
 }
 
@@ -6087,9 +6174,6 @@ HttpTransact::is_response_cacheable(State *s, HTTPHdr *request, HTTPHdr *respons
                            "request is not cache lookupable, response is not cachable");
     return false;
   }
-  // already has a fresh copy in the cache
-  if (s->range_setup == RANGE_NOT_HANDLED)
-    return false;
 
   // Check whether the response is cachable based on its cookie
   // If there are cookies in response but a ttl is set, allow caching
@@ -6177,11 +6261,20 @@ HttpTransact::is_response_cacheable(State *s, HTTPHdr *request, HTTPHdr *respons
     }
   }
   // do not cache partial content - Range response
-  if (response_code == HTTP_STATUS_PARTIAL_CONTENT || response_code == HTTP_STATUS_RANGE_NOT_SATISFIABLE) {
+  if (response_code == HTTP_STATUS_RANGE_NOT_SATISFIABLE) {
     DebugTxn("http_trans", "[is_response_cacheable] "
                            "response code %d - don't cache",
              response_code);
     return false;
+  } else if (response->presence(MIME_PRESENCE_CONTENT_RANGE) && !s->hdr_info.response_range.isValid()) {
+    if (0 <= s->hdr_info.response_content_size) {
+      DebugTxn("http_trans", "[is_response_cacheable] "
+                             "Content-Range header present with unsatisfiable range");
+    } else {
+      DebugTxn("http_trans", "[is_response_cacheable] "
+                             "Content-Range header present but unparsable");
+    }
+    return false;
   }
 
   // check if cache control overrides default cacheability
@@ -6242,8 +6335,9 @@ HttpTransact::is_response_cacheable(State *s, HTTPHdr *request, HTTPHdr *respons
   // default cacheability
   if (!s->txn_conf->negative_caching_enabled) {
     if ((response_code == HTTP_STATUS_OK) || (response_code == HTTP_STATUS_NOT_MODIFIED) ||
-        (response_code == HTTP_STATUS_NON_AUTHORITATIVE_INFORMATION) || (response_code == HTTP_STATUS_MOVED_PERMANENTLY) ||
-        (response_code == HTTP_STATUS_MULTIPLE_CHOICES) || (response_code == HTTP_STATUS_GONE)) {
+        (response_code == HTTP_STATUS_PARTIAL_CONTENT) || (response_code == HTTP_STATUS_NON_AUTHORITATIVE_INFORMATION) ||
+        (response_code == HTTP_STATUS_MOVED_PERMANENTLY) || (response_code == HTTP_STATUS_MULTIPLE_CHOICES) ||
+        (response_code == HTTP_STATUS_GONE)) {
       DebugTxn("http_trans", "[is_response_cacheable] YES by default ");
       return true;
     } else {
@@ -6380,6 +6474,11 @@ HttpTransact::is_request_valid(State *s, HTTPHdr *incoming_request)
     build_error_response(s, HTTP_STATUS_BAD_REQUEST, "Invalid Content Length", "request#invalid_content_length", NULL);
     return false;
   }
+  case INVALID_RANGE_FIELD: {
+    DebugTxn("http_trans", "[is_request_valid] a Range field was present with an invalid range specification");
+    SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_NO_FORWARD);
+    build_error_response(s, HTTP_STATUS_BAD_REQUEST, "Invalid Range", "request#syntax_error", NULL);
+  }
   default:
     return true;
   }
@@ -6652,6 +6751,7 @@ void
 HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *base)
 {
   int64_t cl = HTTP_UNDEFINED_CL;
+
   ink_assert(header->type_get() == HTTP_TYPE_RESPONSE);
   if (base->presence(MIME_PRESENCE_CONTENT_LENGTH)) {
     cl = base->get_content_length();
@@ -6663,13 +6763,16 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
       case SOURCE_HTTP_ORIGIN_SERVER:
         // We made our decision about whether to trust the
         //   response content length in init_state_vars_from_response()
-        if (s->range_setup != HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED)
-          break;
+        if (s->hdr_info.request_range.hasRanges()) {
+          change_response_header_because_of_range_request(s, header);
+          s->hdr_info.trust_response_cl = true;
+        }
+        break;
 
       case SOURCE_CACHE:
         // if we are doing a single Range: request, calculate the new
         // C-L: header
-        if (s->range_setup == HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED) {
+        if (s->hdr_info.request_range.hasRanges()) {
           change_response_header_because_of_range_request(s, header);
           s->hdr_info.trust_response_cl = true;
         }
@@ -6689,7 +6792,7 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
         break;
 
       case SOURCE_TRANSFORM:
-        if (s->range_setup == HttpTransact::RANGE_REQUESTED) {
+        if (s->hdr_info.request_range.hasRanges()) {
           header->set_content_length(s->range_output_cl);
           s->hdr_info.trust_response_cl = true;
         } else if (s->hdr_info.transform_response_cl == HTTP_UNDEFINED_CL) {
@@ -6722,7 +6825,7 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
         s->hdr_info.trust_response_cl = false;
         s->hdr_info.request_content_length = HTTP_UNDEFINED_CL;
         ink_assert(s->range_setup == RANGE_NONE);
-      } else if (s->range_setup == RANGE_NOT_TRANSFORM_REQUESTED) {
+      } else if (s->hdr_info.response_range.isValid()) {
         // if we are doing a single Range: request, calculate the new
         // C-L: header
         change_response_header_because_of_range_request(s, header);
@@ -6744,7 +6847,6 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
         s->hdr_info.trust_response_cl = false;
       }
       header->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
-      ink_assert(s->range_setup != RANGE_NOT_TRANSFORM_REQUESTED);
     }
   }
   return;
@@ -7691,7 +7793,8 @@ HttpTransact::is_request_likely_cacheable(State *s, HTTPHdr *request)
 }
 
 void
-HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version)
+HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version,
+                            HTTPRangeSpec const *ranges)
 {
   // this part is to restore the original URL in case, multiple cache
   // lookups have happened - client request has been changed as the result
@@ -7718,6 +7821,8 @@ HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_r
   HttpTransactHeaders::remove_privacy_headers_from_request(s->http_config_param, s->txn_conf, outgoing_request);
   HttpTransactHeaders::add_global_user_agent_header_to_request(s->txn_conf, outgoing_request);
   handle_request_keep_alive_headers(s, outgoing_version, outgoing_request);
+  if (ranges)
+    HttpTransactHeaders::insert_request_range_header(outgoing_request, ranges);
 
   // handle_conditional_headers appears to be obsolete.  Nothing happens
   // unelss s->cache_info.action == HttpTransact::CACHE_DO_UPDATE.  In that
@@ -7848,7 +7953,8 @@ HttpTransact::build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing
   if (base_response == NULL) {
     HttpTransactHeaders::build_base_response(outgoing_response, status_code, reason_phrase, strlen(reason_phrase), s->current.now);
   } else {
-    if ((status_code == HTTP_STATUS_NONE) || (status_code == base_response->status_get())) {
+    if ((status_code == HTTP_STATUS_NONE) || (status_code == base_response->status_get()) ||
+        (HTTP_STATUS_OK == status_code && HTTP_STATUS_PARTIAL_CONTENT == base_response->status_get())) {
       HttpTransactHeaders::copy_header_fields(base_response, outgoing_response, s->txn_conf->fwd_proxy_auth_to_parent);
 
       if (s->txn_conf->insert_age_in_response)
@@ -7862,6 +7968,7 @@ HttpTransact::build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing
       //  before processing the keep_alive headers
       //
       handle_content_length_header(s, outgoing_response, base_response);
+
     } else
       switch (status_code) {
       case HTTP_STATUS_NOT_MODIFIED:
@@ -9005,8 +9112,12 @@ HttpTransact::delete_warning_value(HTTPHdr *to_warn, HTTPWarningCode warning_cod
 void
 HttpTransact::change_response_header_because_of_range_request(State *s, HTTPHdr *header)
 {
-  MIMEField *field;
+  MIMEField *field = header->field_find(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
   char *reason_phrase;
+  //  CacheVConnection* cache_read_vc = s->state_machine->get_cache_sm().cache_read_vc;
+  //  HTTPHdr* cached_response = find_appropriate_cached_resp(s);
+  //  HTTPRangeSpec& rs = cache_read_vc->get_http_range_spec();
+  HTTPRangeSpec &rs = s->state_machine->t_state.hdr_info.request_range;
 
   Debug("http_trans", "Partial content requested, re-calculating content-length");
 
@@ -9015,36 +9126,34 @@ HttpTransact::change_response_header_because_of_range_request(State *s, HTTPHdr
   header->reason_set(reason_phrase, strlen(reason_phrase));
 
   // set the right Content-Type for multiple entry Range
-  if (s->num_range_fields > 1) {
-    field = header->field_find(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
+  if (rs.isMulti()) { // means we need a boundary string.
+    ink_release_assert(!"[amc] Computation of boundary string not correct working");
+#if 0
+    int rbs_len;
+    char const* rbs = cache_read_vc->get_http_range_boundary_string(&rbs_len);
+    char buff[(sizeof(HTTP_RANGE_MULTIPART_CONTENT_TYPE)-1) + HTTP_RANGE_BOUNDARY_LEN];
 
     if (field != NULL)
       header->field_delete(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
 
     field = header->field_create(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
-    field->value_append(header->m_heap, header->m_mime, range_type, sizeof(range_type) - 1);
+    snprintf(buff, sizeof(buff), "%s%.*s", HTTP_RANGE_MULTIPART_CONTENT_TYPE, rbs_len, rbs);
+    field->value_append(header->m_heap, header->m_mime, buff, sizeof(buff));
 
     header->field_attach(field);
-    // TODO: There's a known bug here where the Content-Length is not correct for multi-part
-    // Range: requests.
-    header->set_content_length(s->range_output_cl);
-  } else {
-    if (s->cache_info.object_read && s->cache_info.object_read->valid()) {
-      // TODO: It's unclear under which conditions we need to update the Content-Range: header,
-      // many times it's already set correctly before calling this. For now, always try do it
-      // when we have the information for it available.
-      // TODO: Also, it's unclear as to why object_read->valid() is not always true here.
-      char numbers[RANGE_NUMBERS_LENGTH];
-      header->field_delete(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
-      field = header->field_create(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
-      snprintf(numbers, sizeof(numbers), "bytes %" PRId64 "-%" PRId64 "/%" PRId64, s->ranges[0]._start, s->ranges[0]._end,
-               s->cache_info.object_read->object_size_get());
-      field->value_set(header->m_heap, header->m_mime, numbers, strlen(numbers));
-      header->field_attach(field);
-    }
-    // Always update the Content-Length: header.
-    header->set_content_length(s->range_output_cl);
+#endif
+  } else if (rs.isSingle()) {
+    int n;
+    char buff[HTTP_LEN_BYTES + (18 + 1) * 3];
+    header->field_delete(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
+    field = header->field_create(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
+    n = snprintf(buff, sizeof(buff), "%s %" PRIu64 "-%" PRIu64 "/%" PRId64, HTTP_VALUE_BYTES, rs[0]._min, rs[0]._max,
+                 s->state_machine->t_state.hdr_info.response_content_size);
+    field->value_set(header->m_heap, header->m_mime, buff, n);
+    header->field_attach(field);
+    header->set_content_length(rs.size());
   }
+  //  header->set_content_length(cache_read_vc->get_effective_content_size());
 }
 
 #if TS_HAS_TESTS

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransact.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransact.h b/proxy/http/HttpTransact.h
index fa64940..6d53231 100644
--- a/proxy/http/HttpTransact.h
+++ b/proxy/http/HttpTransact.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #if !defined(_HttpTransact_h_)
 #define _HttpTransact_h_
 
@@ -53,44 +52,37 @@
 #define ACQUIRE_PRINT_LOCK() // ink_mutex_acquire(&print_lock);
 #define RELEASE_PRINT_LOCK() // ink_mutex_release(&print_lock);
 
-#define DUMP_HEADER(T, H, I, S)                                 \
-  {                                                             \
-    if (diags->on(T)) {                                         \
-      ACQUIRE_PRINT_LOCK()                                      \
-      fprintf(stderr, "+++++++++ %s +++++++++\n", S);           \
-      fprintf(stderr, "-- State Machine Id: %" PRId64 "\n", I); \
-      char b[4096];                                             \
-      int used, tmp, offset;                                    \
-      int done;                                                 \
-      offset = 0;                                               \
-      if ((H)->valid()) {                                       \
-        do {                                                    \
-          used = 0;                                             \
-          tmp = offset;                                         \
-          done = (H)->print(b, 4095, &used, &tmp);              \
-          offset += used;                                       \
-          b[used] = '\0';                                       \
-          fprintf(stderr, "%s", b);                             \
-        } while (!done);                                        \
-      }                                                         \
-      RELEASE_PRINT_LOCK()                                      \
-    }                                                           \
+#define DUMP_HEADER(T, H, I, S)                                            \
+  {                                                                        \
+    if (diags->on(T)) {                                                    \
+      ACQUIRE_PRINT_LOCK() fprintf(stderr, "+++++++++ %s +++++++++\n", S); \
+      fprintf(stderr, "-- State Machine Id: %" PRId64 "\n", I);            \
+      char b[4096];                                                        \
+      int used, tmp, offset;                                               \
+      int done;                                                            \
+      offset = 0;                                                          \
+      if ((H)->valid()) {                                                  \
+        do {                                                               \
+          used = 0;                                                        \
+          tmp = offset;                                                    \
+          done = (H)->print(b, 4095, &used, &tmp);                         \
+          offset += used;                                                  \
+          b[used] = '\0';                                                  \
+          fprintf(stderr, "%s", b);                                        \
+        } while (!done);                                                   \
+      }                                                                    \
+      RELEASE_PRINT_LOCK()                                                 \
+    }                                                                      \
   }
 
-
 #define TRANSACT_SETUP_RETURN(n, r) \
   s->next_action = n;               \
   s->transact_return_point = r;     \
   DebugSpecific((s->state_machine && s->state_machine->debug_on), "http_trans", "Next action %s; %s", #n, #r);
 
-#define TRANSACT_RETURN(n, r) \
-  TRANSACT_SETUP_RETURN(n, r) \
-  return;
-
-#define TRANSACT_RETURN_VAL(n, r, v) \
-  TRANSACT_SETUP_RETURN(n, r)        \
-  return v;
+#define TRANSACT_RETURN(n, r) TRANSACT_SETUP_RETURN(n, r) return;
 
+#define TRANSACT_RETURN_VAL(n, r, v) TRANSACT_SETUP_RETURN(n, r) return v;
 
 #define SET_UNPREPARE_CACHE_ACTION(C)                               \
   {                                                                 \
@@ -377,6 +369,7 @@ public:
     SCHEME_NOT_SUPPORTED,
     UNACCEPTABLE_TE_REQUIRED,
     INVALID_POST_CONTENT_LENGTH,
+    INVALID_RANGE_FIELD,
     TOTAL_REQUEST_ERROR_TYPES
   };
 
@@ -446,22 +439,18 @@ public:
     // SM_ACTION_AUTH_LOOKUP,
     SM_ACTION_DNS_LOOKUP,
     SM_ACTION_DNS_REVERSE_LOOKUP,
-
     SM_ACTION_CACHE_LOOKUP,
     SM_ACTION_CACHE_ISSUE_WRITE,
     SM_ACTION_CACHE_ISSUE_WRITE_TRANSFORM,
     SM_ACTION_CACHE_PREPARE_UPDATE,
     SM_ACTION_CACHE_ISSUE_UPDATE,
-
+    SM_ACTION_CACHE_OPEN_PARTIAL_READ,
     SM_ACTION_ICP_QUERY,
-
     SM_ACTION_ORIGIN_SERVER_OPEN,
     SM_ACTION_ORIGIN_SERVER_RAW_OPEN,
     SM_ACTION_ORIGIN_SERVER_RR_MARK_DOWN,
-
     SM_ACTION_READ_PUSH_HDR,
     SM_ACTION_STORE_PUSH_BODY,
-
     SM_ACTION_INTERNAL_CACHE_DELETE,
     SM_ACTION_INTERNAL_CACHE_NOOP,
     SM_ACTION_INTERNAL_CACHE_UPDATE_HEADERS,
@@ -473,14 +462,12 @@ public:
 #ifdef PROXY_DRAIN
     SM_ACTION_DRAIN_REQUEST_BODY,
 #endif /* PROXY_DRAIN */
-
     SM_ACTION_SERVE_FROM_CACHE,
     SM_ACTION_SERVER_READ,
     SM_ACTION_SERVER_PARSE_NEXT_HDR,
     SM_ACTION_TRANSFORM_READ,
     SM_ACTION_SSL_TUNNEL,
     SM_ACTION_CONTINUE,
-
     SM_ACTION_API_SM_START,
     SM_ACTION_API_READ_REQUEST_HDR,
     SM_ACTION_API_PRE_REMAP,
@@ -492,7 +479,6 @@ public:
     SM_ACTION_API_READ_RESPONSE_HDR,
     SM_ACTION_API_SEND_RESPONSE_HDR,
     SM_ACTION_API_SM_SHUTDOWN,
-
     SM_ACTION_REMAP_REQUEST,
     SM_ACTION_POST_REMAP_SKIP,
     SM_ACTION_REDIRECT_READ
@@ -544,10 +530,9 @@ public:
 
   enum RangeSetup_t {
     RANGE_NONE = 0,
-    RANGE_REQUESTED,
     RANGE_NOT_SATISFIABLE,
-    RANGE_NOT_HANDLED,
-    RANGE_NOT_TRANSFORM_REQUESTED,
+    RANGE_PARTIAL_WRITE,  ///< Cache a range request.
+    RANGE_PARTIAL_UPDATE, ///< Update an existing object with a range request.
   };
 
   enum CacheAuth_t {
@@ -760,9 +745,15 @@ public:
     HTTPHdr transform_response;
     HTTPHdr cache_response;
     int64_t request_content_length;
-    int64_t response_content_length;
+    int64_t response_content_length; // Length of the payload (Content-Length
+                                     // field)
+    int64_t response_content_size;   // Total size of the object on the origin
+                                     // server.
     int64_t transform_request_cl;
     int64_t transform_response_cl;
+    HTTPRangeSpec request_range;
+    HTTPRangeSpec::Range response_range;
+    ts::ConstBuffer response_range_boundary; // not used yet
     bool client_req_is_server_style;
     bool trust_response_cl;
     ResponseError_t response_error;
@@ -786,7 +777,6 @@ public:
     _SquidLogInfo() : log_code(SQUID_LOG_ERR_UNKNOWN), hier_code(SQUID_HIER_EMPTY), hit_miss_code(SQUID_MISS_NONE) {}
   } SquidLogInfo;
 
-
 #define HTTP_TRANSACT_STATE_MAX_XBUF_SIZE (1024 * 2) /* max size of plugin exchange buffer */
 
   struct State {
@@ -842,7 +832,8 @@ public:
     StateMachineAction_t api_next_action;                  // out
     void (*transact_return_point)(HttpTransact::State *s); // out
 
-    // We keep this so we can jump back to the upgrade handler after remap is complete
+    // We keep this so we can jump back to the upgrade handler after remap is
+    // complete
     bool is_upgrade_request;
     void (*post_remap_upgrade_return_point)(HttpTransact::State *s); // out
     const char *upgrade_token_wks;
@@ -903,7 +894,8 @@ public:
     int api_txn_no_activity_timeout_value;
 
     // Used by INKHttpTxnCachedReqGet and INKHttpTxnCachedRespGet SDK functions
-    // to copy part of HdrHeap (only the writable portion) for cached response headers
+    // to copy part of HdrHeap (only the writable portion) for cached response
+    // headers
     // and request headers
     // These ptrs are deallocate when transaction is over.
     HdrHeapSDKHandle *cache_req_hdr_heap_handle;
@@ -951,7 +943,8 @@ public:
     RangeRecord *ranges;
 
     OverridableHttpConfigParams *txn_conf;
-    OverridableHttpConfigParams my_txn_conf; // Storage for plugins, to avoid malloc
+    OverridableHttpConfigParams my_txn_conf; // Storage for plugins, to avoid
+                                             // malloc
 
     bool transparent_passthrough;
     bool range_in_cache;
@@ -1218,7 +1211,8 @@ public:
   static bool will_this_request_self_loop(State *s);
   static bool is_request_likely_cacheable(State *s, HTTPHdr *request);
 
-  static void build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version);
+  static void build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version,
+                            HTTPRangeSpec const *ranges = 0);
   static void build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing_response, HTTPVersion outgoing_version,
                              HTTPStatus status_code, const char *reason_phrase = NULL);
   static void build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing_response, HTTPVersion outgoing_version);
@@ -1260,7 +1254,8 @@ public:
   static void client_result_stat(State *s, ink_hrtime total_time, ink_hrtime request_process_time);
   static void add_new_stat_block(State *s);
   static void delete_warning_value(HTTPHdr *to_warn, HTTPWarningCode warning_code);
-  static bool is_connection_collapse_checks_success(State *s); // YTS Team, yamsat
+  static bool is_connection_collapse_checks_success(State *s); // YTS Team,
+                                                               // yamsat
 };
 
 typedef void (*TransactEntryFunc_t)(HttpTransact::State *s);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransactHeaders.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransactHeaders.cc b/proxy/http/HttpTransactHeaders.cc
index 28cdffc..0c700e3 100644
--- a/proxy/http/HttpTransactHeaders.cc
+++ b/proxy/http/HttpTransactHeaders.cc
@@ -1036,3 +1036,16 @@ HttpTransactHeaders::remove_privacy_headers_from_request(HttpConfigParams *http_
     }
   }
 }
+
+void
+HttpTransactHeaders::insert_request_range_header(HTTPHdr *header, HTTPRangeSpec const *ranges)
+{
+  int n;
+  char buff[1024];
+
+  if (ranges->hasRanges()) {
+    int64_t ffs = cacheProcessor.get_fixed_fragment_size();
+    n = ranges->print_quantized(buff, sizeof(buff), ffs, ffs);
+    header->value_set(MIME_FIELD_RANGE, MIME_LEN_RANGE, buff, n);
+  }
+}

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransactHeaders.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransactHeaders.h b/proxy/http/HttpTransactHeaders.h
index 505a6fa..c4d1b92 100644
--- a/proxy/http/HttpTransactHeaders.h
+++ b/proxy/http/HttpTransactHeaders.h
@@ -85,6 +85,7 @@ public:
   static void remove_privacy_headers_from_request(HttpConfigParams *http_config_param, OverridableHttpConfigParams *http_txn_conf,
                                                   HTTPHdr *header);
 
+  static void insert_request_range_header(HTTPHdr *header, HTTPRangeSpec const *ranges);
   static int nstrcpy(char *d, const char *as);
 };
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTunnel.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc
index 571d512..887bf77 100644
--- a/proxy/http/HttpTunnel.cc
+++ b/proxy/http/HttpTunnel.cc
@@ -830,13 +830,7 @@ HttpTunnel::producer_run(HttpTunnelProducer *p)
     }
   }
 
-  int64_t read_start_pos = 0;
-  if (p->vc_type == HT_CACHE_READ && sm->t_state.range_setup == HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED) {
-    ink_assert(sm->t_state.num_range_fields == 1); // we current just support only one range entry
-    read_start_pos = sm->t_state.ranges[0]._start;
-    producer_n = (sm->t_state.ranges[0]._end - sm->t_state.ranges[0]._start) + 1;
-    consumer_n = (producer_n + sm->client_response_hdr_bytes);
-  } else if (p->nbytes >= 0) {
+  if (p->nbytes >= 0) {
     consumer_n = p->nbytes;
     producer_n = p->ntodo;
   } else {
@@ -988,11 +982,7 @@ HttpTunnel::producer_run(HttpTunnelProducer *p)
       Debug("http_tunnel", "[%" PRId64 "] [tunnel_run] producer already done", sm->sm_id);
       producer_handler(HTTP_TUNNEL_EVENT_PRECOMPLETE, p);
     } else {
-      if (read_start_pos > 0) {
-        p->read_vio = ((CacheVC *)p->vc)->do_io_pread(this, producer_n, p->read_buffer, read_start_pos);
-      } else {
-        p->read_vio = p->vc->do_io_read(this, producer_n, p->read_buffer);
-      }
+      p->read_vio = p->vc->do_io_read(this, producer_n, p->read_buffer);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HPACK.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/HPACK.cc b/proxy/http2/HPACK.cc
index b074898..1b3a83c 100644
--- a/proxy/http2/HPACK.cc
+++ b/proxy/http2/HPACK.cc
@@ -241,8 +241,10 @@ Http2DynamicTable::add_header_field(const MIMEField *field)
   uint32_t header_size = ADDITIONAL_OCTETS + name_len + value_len;
 
   if (header_size > _settings_dynamic_table_size) {
-    // 5.3. It is not an error to attempt to add an entry that is larger than the maximum size; an
-    // attempt to add an entry larger than the entire table causes the table to be emptied of all existing entries.
+    // 5.3. It is not an error to attempt to add an entry that is larger than
+    // the maximum size; an
+    // attempt to add an entry larger than the entire table causes the table to
+    // be emptied of all existing entries.
     _headers.clear();
     _mhdr->fields_clear();
   } else {
@@ -602,7 +604,8 @@ decode_literal_header_field(MIMEFieldWrapper &header, const uint8_t *buf_start,
   HpackFieldType ftype = hpack_parse_field_type(*p);
 
   if (ftype == HPACK_FIELD_INDEXED_LITERAL) {
-    // 7.2.1. index extraction based on Literal Header Field with Incremental Indexing
+    // 7.2.1. index extraction based on Literal Header Field with Incremental
+    // Indexing
     len = decode_integer(index, p, buf_end, 6);
     isIncremental = true;
   } else if (ftype == HPACK_FIELD_NEVERINDEX_LITERAL) {
@@ -654,7 +657,6 @@ decode_literal_header_field(MIMEFieldWrapper &header, const uint8_t *buf_start,
   p += len;
   header.value_set(value_str, value_str_len);
 
-
   // Incremental Indexing adds header to header table as new entry
   if (isIncremental) {
     dynamic_table.add_header_field(header.field_get());

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HPACK.h
----------------------------------------------------------------------
diff --git a/proxy/http2/HPACK.h b/proxy/http2/HPACK.h
index 4e63a37..a385e93 100644
--- a/proxy/http2/HPACK.h
+++ b/proxy/http2/HPACK.h
@@ -47,9 +47,12 @@ const static int HPACK_ERROR_HTTP2_PROTOCOL_ERROR = -2;
 
 enum HpackFieldType {
   HPACK_FIELD_INDEX,              // HPACK 7.1 Indexed Header Field Representation
-  HPACK_FIELD_INDEXED_LITERAL,    // HPACK 7.2.1 Literal Header Field with Incremental Indexing
-  HPACK_FIELD_NOINDEX_LITERAL,    // HPACK 7.2.2 Literal Header Field without Indexing
-  HPACK_FIELD_NEVERINDEX_LITERAL, // HPACK 7.2.3 Literal Header Field never Indexed
+  HPACK_FIELD_INDEXED_LITERAL,    // HPACK 7.2.1 Literal Header Field with
+                                  // Incremental Indexing
+  HPACK_FIELD_NOINDEX_LITERAL,    // HPACK 7.2.2 Literal Header Field without
+                                  // Indexing
+  HPACK_FIELD_NEVERINDEX_LITERAL, // HPACK 7.2.3 Literal Header Field never
+                                  // Indexed
   HPACK_FIELD_TABLESIZE_UPDATE,   // HPACK 7.3 Header Table Size Update
 };
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HTTP2.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/HTTP2.cc b/proxy/http2/HTTP2.cc
index 5a6637b..6e3b711 100644
--- a/proxy/http2/HTTP2.cc
+++ b/proxy/http2/HTTP2.cc
@@ -102,7 +102,8 @@ http2_are_frame_flags_valid(uint8_t ftype, uint8_t fflags)
     HTTP2_FLAGS_WINDOW_UPDATE_MASK, HTTP2_FLAGS_CONTINUATION_MASK,
   };
 
-  // The frame flags are valid for this frame if nothing outside the defined bits is set.
+  // The frame flags are valid for this frame if nothing outside the defined
+  // bits is set.
   return (fflags & ~mask[ftype]) == 0;
 }
 
@@ -129,8 +130,7 @@ http2_settings_parameter_is_valid(const Http2SettingsParameter &param)
 {
   // Static maximum values for Settings parameters.
   static const uint32_t settings_max[HTTP2_SETTINGS_MAX] = {
-    0,
-    UINT_MAX,              // HTTP2_SETTINGS_HEADER_TABLE_SIZE
+    0, UINT_MAX,           // HTTP2_SETTINGS_HEADER_TABLE_SIZE
     1,                     // HTTP2_SETTINGS_ENABLE_PUSH
     UINT_MAX,              // HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS
     HTTP2_MAX_WINDOW_SIZE, // HTTP2_SETTINGS_INITIAL_WINDOW_SIZE
@@ -316,7 +316,6 @@ http2_parse_headers_parameter(IOVec iov, Http2HeadersParameter &params)
   return true;
 }
 
-
 // 6.3.  PRIORITY
 //
 // 0                   1                   2                   3
@@ -392,7 +391,6 @@ http2_parse_settings_parameter(IOVec iov, Http2SettingsParameter &param)
   return true;
 }
 
-
 // 6.8.  GOAWAY
 //
 // 0                   1                   2                   3
@@ -420,7 +418,6 @@ http2_parse_goaway(IOVec iov, Http2Goaway &goaway)
   return true;
 }
 
-
 // 6.9.  WINDOW_UPDATE
 //
 // 0                   1                   2                   3
@@ -587,8 +584,10 @@ http2_write_header_fragment(HTTPHdr *in, MIMEFieldIter &field_iter, uint8_t *out
   ink_assert(http_hdr_type_get(in->m_http) != HTTP_TYPE_UNKNOWN);
   ink_assert(in);
 
-  // TODO Get a index value from the tables for the header field, and then choose a representation type.
-  // TODO Each indexing types per field should be passed by a caller, HTTP/2 implementation.
+  // TODO Get a index value from the tables for the header field, and then
+  // choose a representation type.
+  // TODO Each indexing types per field should be passed by a caller, HTTP/2
+  // implementation.
 
   // Get first header field which is required encoding
   MIMEField *field;
@@ -766,7 +765,6 @@ Http2::init()
   REC_EstablishStaticConfigInt32U(max_header_list_size, "proxy.config.http2.max_header_list_size");
 }
 
-
 #if TS_HAS_TESTS
 
 #include "TestBox.h"
@@ -777,10 +775,11 @@ const static int MAX_TEST_FIELD_NUM = 8;
 
 /***********************************************************************************
  *                                                                                 *
- *                   Test cases for regression test                                *
+ *                   Test cases for regression test *
  *                                                                                 *
- * Some test cases are based on examples of specification.                         *
- * http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09#appendix-D  *
+ * Some test cases are based on examples of specification. *
+ * http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09#appendix-D
+ **
  *                                                                                 *
  ***********************************************************************************/
 
@@ -887,7 +886,7 @@ const static struct {
 
 /***********************************************************************************
  *                                                                                 *
- *                                Regression test codes                            *
+ *                                Regression test codes *
  *                                                                                 *
  ***********************************************************************************/
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HTTP2.h
----------------------------------------------------------------------
diff --git a/proxy/http2/HTTP2.h b/proxy/http2/HTTP2.h
index bbeffd3..fe976de 100644
--- a/proxy/http2/HTTP2.h
+++ b/proxy/http2/HTTP2.h
@@ -33,7 +33,8 @@ class HTTPHdr;
 
 typedef unsigned Http2StreamId;
 
-// 6.9.2 Initial Flow Control Window Size - the flow control window can be come negative
+// 6.9.2 Initial Flow Control Window Size - the flow control window can be come
+// negative
 // so we need to track it with a signed type.
 typedef int32_t Http2WindowSize;
 
@@ -77,7 +78,6 @@ enum Http2ErrorCode {
   HTTP2_ERROR_ENHANCE_YOUR_CALM = 11,
   HTTP2_ERROR_INADEQUATE_SECURITY = 12,
   HTTP2_ERROR_HTTP_1_1_REQUIRED = 13,
-
   HTTP2_ERROR_MAX,
 };
 
@@ -103,7 +103,6 @@ enum Http2FrameType {
   HTTP2_FRAME_TYPE_GOAWAY = 7,
   HTTP2_FRAME_TYPE_WINDOW_UPDATE = 8,
   HTTP2_FRAME_TYPE_CONTINUATION = 9,
-
   HTTP2_FRAME_TYPE_MAX,
 };
 
@@ -111,7 +110,6 @@ enum Http2FrameType {
 enum Http2FrameFlagsData {
   HTTP2_FLAGS_DATA_END_STREAM = 0x01,
   HTTP2_FLAGS_DATA_PADDED = 0x08,
-
   HTTP2_FLAGS_DATA_MASK = 0x2B,
 };
 
@@ -121,7 +119,6 @@ enum Http2FrameFlagsHeaders {
   HTTP2_FLAGS_HEADERS_END_HEADERS = 0x04,
   HTTP2_FLAGS_HEADERS_PADDED = 0x08,
   HTTP2_FLAGS_HEADERS_PRIORITY = 0x20,
-
   HTTP2_FLAGS_HEADERS_MASK = 0x2B,
 };
 
@@ -136,27 +133,18 @@ enum Http2FrameFlagsRstStream {
 };
 
 // 6.4 Settings
-enum Http2FrameFlagsSettings {
-  HTTP2_FLAGS_SETTINGS_ACK = 0x01,
-
-  HTTP2_FLAGS_SETTINGS_MASK = 0x01
-};
+enum Http2FrameFlagsSettings { HTTP2_FLAGS_SETTINGS_ACK = 0x01, HTTP2_FLAGS_SETTINGS_MASK = 0x01 };
 
 // 6.6 Push Promise
 enum Http2FrameFlagsPushPromise {
   HTTP2_FLAGS_PUSH_PROMISE_END_HEADERS = 0x04,
   HTTP2_FLAGS_PUSH_PROMISE_PAD_LOW = 0x08,
   HTTP2_FLAGS_PUSH_PROMISE_PAD_HIGH = 0x10,
-
   HTTP2_FLAGS_PUSH_PROMISE_MASK = 0x1C,
 };
 
 // 6.7 Ping
-enum Http2FrameFlagsPing {
-  HTTP2_FLAGS_PING_ACK = 0x01,
-
-  HTTP2_FLAGS_PING_MASK = 0x01
-};
+enum Http2FrameFlagsPing { HTTP2_FLAGS_PING_ACK = 0x01, HTTP2_FLAGS_PING_MASK = 0x01 };
 
 // 6.8 Goaway
 enum Http2FrameFlagsGoaway {
@@ -173,7 +161,6 @@ enum Http2FrameFlagsContinuation {
   HTTP2_FLAGS_CONTINUATION_END_HEADERS = 0x04,
   HTTP2_FLAGS_CONTINUATION_PAD_LOW = 0x08,
   HTTP2_FLAGS_CONTINUATION_PAD_HIGH = 0x10,
-
   HTTP2_FLAGS_CONTINUATION_MASK = 0x1C,
 };
 
@@ -185,7 +172,6 @@ enum Http2SettingsIdentifier {
   HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 4,
   HTTP2_SETTINGS_MAX_FRAME_SIZE = 5,
   HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 6,
-
   HTTP2_SETTINGS_MAX
 };
 
@@ -222,8 +208,10 @@ struct Http2Goaway {
   Http2StreamId last_streamid;
   uint32_t error_code;
 
-  // NOTE: we don't (de)serialize the variable length debug data at this layer because there's
-  // really nothing we can do with it without some out of band agreement. Trying to deal with it
+  // NOTE: we don't (de)serialize the variable length debug data at this layer
+  // because there's
+  // really nothing we can do with it without some out of band agreement. Trying
+  // to deal with it
   // just complicates memory management.
 };
 
@@ -286,9 +274,10 @@ int64_t http2_write_psuedo_headers(HTTPHdr *, uint8_t *, uint64_t, Http2DynamicT
 
 int64_t http2_write_header_fragment(HTTPHdr *, MIMEFieldIter &, uint8_t *, uint64_t, Http2DynamicTable &, bool &);
 
-
-// Not sure where else to put this, but figure this is as good of a start as anything else.
-// Right now, only the static init() is available, which sets up some basic librecords
+// Not sure where else to put this, but figure this is as good of a start as
+// anything else.
+// Right now, only the static init() is available, which sets up some basic
+// librecords
 // dependencies.
 class Http2
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2ClientSession.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2ClientSession.cc b/proxy/http2/Http2ClientSession.cc
index 40c4a50..87c9204 100644
--- a/proxy/http2/Http2ClientSession.cc
+++ b/proxy/http2/Http2ClientSession.cc
@@ -41,7 +41,8 @@
 
 ClassAllocator<Http2ClientSession> http2ClientSessionAllocator("http2ClientSessionAllocator");
 
-// memcpy the requested bytes from the IOBufferReader, returning how many were actually copied.
+// memcpy the requested bytes from the IOBufferReader, returning how many were
+// actually copied.
 static inline unsigned
 copy_from_buffer_reader(void *dst, IOBufferReader *reader, unsigned nbytes)
 {
@@ -94,7 +95,8 @@ Http2ClientSession::start()
   // 3.5 HTTP/2 Connection Preface. Upon establishment of a TCP connection and
   // determination that HTTP/2 will be used by both peers, each endpoint MUST
   // send a connection preface as a final confirmation ...
-  // this->write_buffer->write(HTTP2_CONNECTION_PREFACE, HTTP2_CONNECTION_PREFACE_LEN);
+  // this->write_buffer->write(HTTP2_CONNECTION_PREFACE,
+  // HTTP2_CONNECTION_PREFACE_LEN);
 
   this->connection_state.init();
   send_connection_event(&this->connection_state, HTTP2_SESSION_EVENT_INIT, this);
@@ -145,7 +147,8 @@ Http2ClientSession::set_upgrade_context(HTTPHdr *h)
       Http2SettingsParameter param;
       if (!http2_parse_settings_parameter(make_iovec(out_buf + nbytes, HTTP2_SETTINGS_PARAMETER_LEN), param) ||
           !http2_settings_parameter_is_valid(param)) {
-        // TODO ignore incoming invalid parameters and send suitable SETTINGS frame.
+        // TODO ignore incoming invalid parameters and send suitable SETTINGS
+        // frame.
       }
       upgrade_context.client_settings.set((Http2SettingsIdentifier)param.id, param.value);
     }
@@ -181,7 +184,8 @@ Http2ClientSession::do_io_shutdown(ShutdownHowTo_t howto)
   this->client_vc->do_io_shutdown(howto);
 }
 
-// XXX Currently, we don't have a half-closed state, but we will need to implement that. After we send a GOAWAY, there
+// XXX Currently, we don't have a half-closed state, but we will need to
+// implement that. After we send a GOAWAY, there
 // are scenarios where we would like to complete the outstanding streams.
 
 void
@@ -282,8 +286,10 @@ Http2ClientSession::state_read_connection_preface(int event, void *edata)
     }
   }
 
-  // XXX We don't have enough data to check the connection preface. We should reset the accept inactivity
-  // timeout. We should have a maximum timeout to get the session started though.
+  // XXX We don't have enough data to check the connection preface. We should
+  // reset the accept inactivity
+  // timeout. We should have a maximum timeout to get the session started
+  // though.
 
   vio->reenable();
   return 0;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2ClientSession.h
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2ClientSession.h b/proxy/http2/Http2ClientSession.h
index 5e3ab23..3d41821 100644
--- a/proxy/http2/Http2ClientSession.h
+++ b/proxy/http2/Http2ClientSession.h
@@ -87,7 +87,8 @@ public:
     return this->hdr.cooked;
   }
 
-  // Allocate an IOBufferBlock for this frame. This switches us from using the in-line header
+  // Allocate an IOBufferBlock for this frame. This switches us from using the
+  // in-line header
   // buffer, to an external buffer block.
   void
   alloc(int index)
@@ -198,7 +199,6 @@ public:
     return upgrade_context;
   }
 
-
 private:
   Http2ClientSession(Http2ClientSession &);                  // noncopyable
   Http2ClientSession &operator=(const Http2ClientSession &); // noncopyable

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2ConnectionState.h
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2ConnectionState.h b/proxy/http2/Http2ConnectionState.h
index 61526f0..348206a 100644
--- a/proxy/http2/Http2ConnectionState.h
+++ b/proxy/http2/Http2ConnectionState.h
@@ -35,7 +35,8 @@ class Http2ConnectionSettings
 public:
   Http2ConnectionSettings()
   {
-    // 6.5.2.  Defined SETTINGS Parameters. These should generally not be modified,
+    // 6.5.2.  Defined SETTINGS Parameters. These should generally not be
+    // modified,
     // only if the protocol changes should these change.
     settings[indexof(HTTP2_SETTINGS_ENABLE_PUSH)] = 0; // Disabled for now
 
@@ -180,10 +181,10 @@ private:
   uint64_t data_length;
 };
 
-
 // Http2ConnectionState
 //
-// Capture the semantics of a HTTP/2 connection. The client session captures the frame layer, and the
+// Capture the semantics of a HTTP/2 connection. The client session captures the
+// frame layer, and the
 // connection state captures the connection-wide state.
 
 class Http2ConnectionState : public Continuation
@@ -213,7 +214,8 @@ public:
     continued_buffer.iov_base = NULL;
     continued_buffer.iov_len = 0;
 
-    // Load the server settings from the records.config / RecordsConfig.cc settings.
+    // Load the server settings from the records.config / RecordsConfig.cc
+    // settings.
     server_settings.settings_from_configs();
   }
 
@@ -282,7 +284,8 @@ private:
   // Counter for current acive streams which is started by client
   uint32_t client_streams_count;
 
-  // The buffer used for storing incomplete fragments of a header field which consists of multiple frames.
+  // The buffer used for storing incomplete fragments of a header field which
+  // consists of multiple frames.
   Http2StreamId continued_id;
   IOVec continued_buffer;
 };

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2SessionAccept.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2SessionAccept.cc b/proxy/http2/Http2SessionAccept.cc
index fbb25db..5651e5b 100644
--- a/proxy/http2/Http2SessionAccept.cc
+++ b/proxy/http2/Http2SessionAccept.cc
@@ -38,7 +38,8 @@ Http2SessionAccept::~Http2SessionAccept()
 void
 Http2SessionAccept::accept(NetVConnection *netvc, MIOBuffer *iobuf, IOBufferReader *reader)
 {
-  // XXX we need to refactor the ACL checks from HttpSessionAccept so that we can invoke them here, and also in
+  // XXX we need to refactor the ACL checks from HttpSessionAccept so that we
+  // can invoke them here, and also in
   // the SPDY protocol layer ...
   // Warning("skipping access control checks for HTTP/2 connection");
 
@@ -48,8 +49,9 @@ Http2SessionAccept::accept(NetVConnection *netvc, MIOBuffer *iobuf, IOBufferRead
     const sockaddr *client_ip = netvc->get_remote_addr();
     ip_port_text_buffer ipb;
 
-    Debug("http2_seq", "[HttpSessionAccept2:mainEvent %p] accepted connection from %s transport type = %d", netvc,
-          ats_ip_nptop(client_ip, ipb, sizeof(ipb)), netvc->attributes);
+    Debug("http2_seq", "[HttpSessionAccept2:mainEvent %p] accepted connection "
+                       "from %s transport type = %d",
+          netvc, ats_ip_nptop(client_ip, ipb, sizeof(ipb)), netvc->attributes);
   }
 
   // XXX Allocate a Http2ClientSession
@@ -69,7 +71,8 @@ Http2SessionAccept::mainEvent(int event, void *data)
     return EVENT_CONT;
   }
 
-  // XXX We should hoist the error handling so that all the protocols generate the statistics
+  // XXX We should hoist the error handling so that all the protocols generate
+  // the statistics
   // without code duplication.
   if (((long)data) == -ECONNABORTED) {
     HTTP_SUM_DYN_STAT(http_ua_msecs_counts_errors_pre_accept_hangups_stat, 0);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2SessionAccept.h
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2SessionAccept.h b/proxy/http2/Http2SessionAccept.h
index 6d6fce0..e4f219b 100644
--- a/proxy/http2/Http2SessionAccept.h
+++ b/proxy/http2/Http2SessionAccept.h
@@ -27,17 +27,21 @@
 #include "libts.h"
 #include "I_Net.h"
 
-// XXX HttpSessionAccept::Options needs to be refactored and separated from HttpSessionAccept so that
+// XXX HttpSessionAccept::Options needs to be refactored and separated from
+// HttpSessionAccept so that
 // it can generically apply to all protocol implementations.
 #include "http/HttpSessionAccept.h"
 
 // HTTP/2 Session Accept.
 //
-// HTTP/2 needs to be explicitly enabled on a server port. The syntax is different for SSL and raw
-// ports. There's currently no support for the HTTP/1.1 upgrade path. The example below configures
+// HTTP/2 needs to be explicitly enabled on a server port. The syntax is
+// different for SSL and raw
+// ports. There's currently no support for the HTTP/1.1 upgrade path. The
+// example below configures
 // HTTP/2 on port 80 and port 443 (with TLS).
 //
-// CONFIG proxy.config.http.server_ports STRING 80:proto=http2 443:ssl:proto=h2-12
+// CONFIG proxy.config.http.server_ports STRING 80:proto=http2
+// 443:ssl:proto=h2-12
 
 struct Http2SessionAccept : public SessionAccept {
   explicit Http2SessionAccept(const HttpSessionAccept::Options &);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/Log.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/Log.cc b/proxy/logging/Log.cc
index 332131b..84149c4 100644
--- a/proxy/logging/Log.cc
+++ b/proxy/logging/Log.cc
@@ -130,9 +130,12 @@ Log::change_configuration()
   ink_mutex_release(prev->log_object_manager._APImutex);
   Debug("log-api-mutex", "Log::change_configuration released api mutex");
 
-  // Register the new config in the config processor; the old one will now be scheduled for a
-  // future deletion. We don't need to do anything magical with refcounts, since the
-  // configProcessor will keep a reference count, and drop it when the deletion is scheduled.
+  // Register the new config in the config processor; the old one will now be
+  // scheduled for a
+  // future deletion. We don't need to do anything magical with refcounts, since
+  // the
+  // configProcessor will keep a reference count, and drop it when the deletion
+  // is scheduled.
   configProcessor.set(log_configid, new_config);
 
   // If we replaced the logging configuration, flush any log
@@ -516,11 +519,12 @@ Log::init_fields()
     SQUID_LOG_ERR_FUTURE_1, "ERR_FUTURE_1", SQUID_LOG_ERR_UNKNOWN, "ERR_UNKNOWN");
 
   Ptr<LogFieldAliasTable> cache_hit_miss_map = make_ptr(new LogFieldAliasTable);
-  cache_hit_miss_map->init(23, SQUID_HIT_RESERVED, "HIT", SQUID_HIT_LEVEL_1, "HIT_RAM", // Also SQUID_HIT_RAM
-                           SQUID_HIT_LEVEL_2, "HIT_SSD",                                // Also SQUID_HIT_SSD
-                           SQUID_HIT_LEVEL_3, "HIT_DISK",                               // Also SQUID_HIT_DISK
-                           SQUID_HIT_LEVEL_4, "HIT_CLUSTER",                            // Also SQUID_HIT_CLUSTER
-                           SQUID_HIT_LEVEL_5, "HIT_NET",                                // Also SQUID_HIT_NET
+  cache_hit_miss_map->init(23, SQUID_HIT_RESERVED, "HIT", SQUID_HIT_LEVEL_1,
+                           "HIT_RAM",                        // Also SQUID_HIT_RAM
+                           SQUID_HIT_LEVEL_2, "HIT_SSD",     // Also SQUID_HIT_SSD
+                           SQUID_HIT_LEVEL_3, "HIT_DISK",    // Also SQUID_HIT_DISK
+                           SQUID_HIT_LEVEL_4, "HIT_CLUSTER", // Also SQUID_HIT_CLUSTER
+                           SQUID_HIT_LEVEL_5, "HIT_NET",     // Also SQUID_HIT_NET
                            SQUID_HIT_LEVEL_6, "HIT_LEVEL_6", SQUID_HIT_LEVEL_7, "HIT_LEVEL_7", SQUID_HIT_LEVEL_8, "HIT_LEVEL_8",
                            SQUID_HIT_LEVEl_9, "HIT_LEVEL_9", SQUID_MISS_NONE, "MISS", SQUID_MISS_ICP_AUTH, "MISS_ICP_AUTH",
                            SQUID_MISS_HTTP_NON_CACHE, "MISS_HTTP_NON_CACHE", SQUID_MISS_ICP_STOPLIST, "MISS_ICP_STOPLIST",
@@ -1098,8 +1102,9 @@ Log::flush_thread_main(void * /* args ATS_UNUSED */)
       //
       while (total_bytes - bytes_written) {
         if (Log::config->logging_space_exhausted) {
-          Debug("log", "logging space exhausted, failed to write file:%s, have dropped (%d) bytes.", logfile->get_name(),
-                (total_bytes - bytes_written));
+          Debug("log", "logging space exhausted, failed to write file:%s, have "
+                       "dropped (%d) bytes.",
+                logfile->get_name(), (total_bytes - bytes_written));
 
           RecIncrRawStat(log_rsb, mutex->thread_holding, log_stat_bytes_lost_before_written_to_disk_stat,
                          total_bytes - bytes_written);
@@ -1232,7 +1237,9 @@ Log::collate_thread_main(void * /* args ATS_UNUSED */)
       }
 
       if (header->version != LOG_SEGMENT_VERSION) {
-        Note("Invalid LogBuffer received; invalid version - buffer = %u, current = %u", header->version, LOG_SEGMENT_VERSION);
+        Note("Invalid LogBuffer received; invalid version - buffer = %u, "
+             "current = %u",
+             header->version, LOG_SEGMENT_VERSION);
         delete[] header;
         continue;
       }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/Log.h
----------------------------------------------------------------------
diff --git a/proxy/logging/Log.h b/proxy/logging/Log.h
index fb194c9..8a26b8d 100644
--- a/proxy/logging/Log.h
+++ b/proxy/logging/Log.h
@@ -63,7 +63,8 @@
   o Initial State
 
       - A LogBufferPool is allocated, with storage equal to
-        sizeof (LogBufferPoolHeader) + buffer_segment_count * buffer_segment_size
+        sizeof (LogBufferPoolHeader) + buffer_segment_count *
+  buffer_segment_size
 
       - The buffer pool space is divided into buffer_segment_count
         segments, each with a fixed size of buffer_segment_size.
@@ -467,7 +468,6 @@ private:
   Log &operator=(const Log &rhs);
 };
 
-
 static inline bool
 LogRollingEnabledIsValid(int enabled)
 {


[4/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Crash.cc
----------------------------------------------------------------------
diff --git a/proxy/Crash.cc b/proxy/Crash.cc
index 0febbea..b389cbf 100644
--- a/proxy/Crash.cc
+++ b/proxy/Crash.cc
@@ -27,7 +27,8 @@
 #include "signals.h"
 #include "ink_cap.h"
 
-// ucontext.h is deprecated on Darwin, and we really only need it on Linux, so only
+// ucontext.h is deprecated on Darwin, and we really only need it on Linux, so
+// only
 // include it if we are planning to use it.
 #if defined(__linux__)
 #include <ucontext.h>
@@ -93,7 +94,8 @@ crash_logger_init()
     return;
   }
 
-  // By this point, we have an absolute path, so we'd better be able to find the basename.
+  // By this point, we have an absolute path, so we'd better be able to find the
+  // basename.
   basename = strrchr(logger, '/') + 1;
 
   socketpair(AF_UNIX, SOCK_STREAM, 0, pipe);
@@ -150,8 +152,10 @@ crash_logger_invoke(int signo, siginfo_t *info, void *ctx)
     kill(crash_logger_pid, SIGCONT);
 
 #if defined(__linux__)
-    // Write the crashing thread information to the crash logger. While the siginfo_t is blesses by POSIX, the
-    // ucontext_t can contain pointers, so it's highly platform dependent. On Linux with glibc, however, it is
+    // Write the crashing thread information to the crash logger. While the
+    // siginfo_t is blesses by POSIX, the
+    // ucontext_t can contain pointers, so it's highly platform dependent. On
+    // Linux with glibc, however, it is
     // a single memory block that we can just puke out.
     ATS_UNUSED_RETURN(write(crash_logger_fd, info, sizeof(siginfo_t)));
     ATS_UNUSED_RETURN(write(crash_logger_fd, (ucontext_t *)ctx, sizeof(ucontext_t)));

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/DynamicStats.h
----------------------------------------------------------------------
diff --git a/proxy/DynamicStats.h b/proxy/DynamicStats.h
index 50ba1a4..41c3883 100644
--- a/proxy/DynamicStats.h
+++ b/proxy/DynamicStats.h
@@ -34,16 +34,20 @@ _D(cluster_connections_open_stat)
 _D(cluster_connections_openned_stat)
 _D(cluster_con_total_time_stat)
 _D(cluster_ctrl_msgs_sent_stat)
-_D(cluster_slow_ctrl_msgs_sent_stat) // fast ctrl messages do not require a mallo
+_D(cluster_slow_ctrl_msgs_sent_stat) // fast ctrl messages do not require a
+                                     // mallo
 _D(cluster_ctrl_msgs_recvd_stat)
 _D(cluster_slow_ctrl_msgs_recvd_stat)
 _D(cluster_ctrl_msgs_send_time_stat)
 _D(cluster_ctrl_msgs_recv_time_stat)
 _D(cluster_read_bytes_stat)
 _D(cluster_write_bytes_stat)
-_D(cluster_op_delayed_for_lock_stat) // a message to a machine was blocked by a locked connection
-_D(cluster_connections_locked_stat)  // a connection could not use its slot (locked)
-_D(cluster_connections_bumped_stat)  // a connection could not get a slot (scheduled too late)
+_D(cluster_op_delayed_for_lock_stat) // a message to a machine was blocked by a
+                                     // locked connection
+_D(cluster_connections_locked_stat)  // a connection could not use its slot
+                                     // (locked)
+_D(cluster_connections_bumped_stat)  // a connection could not get a slot
+                                     // (scheduled too late)
 _D(cluster_nodes_stat)
 _D(cluster_net_backup_stat)
 _D(cluster_machines_allocated_stat)
@@ -92,7 +96,6 @@ _D(cluster_vc_cache_scan_lock_misses_stat)
 _D(cluster_vc_cache_purges_stat)
 _D(cluster_write_lock_misses_stat)
 
-
 //
 // Dynamic Load Shedding Stats
 //

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/EventName.cc
----------------------------------------------------------------------
diff --git a/proxy/EventName.cc b/proxy/EventName.cc
index 5820236..8e81294 100644
--- a/proxy/EventName.cc
+++ b/proxy/EventName.cc
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #include "ink_config.h"
 #include <stdio.h>
 #include <string.h>
@@ -76,7 +75,6 @@ event_int_to_string(int event, int blen, char *buffer)
   case NET_EVENT_ACCEPT_FAILED:
     return "NET_EVENT_ACCEPT_FAILED";
 
-
 #ifdef CLUSTER_CACHE
   case CLUSTER_EVENT_CHANGE:
     return "CLUSTER_EVENT_CHANGE";
@@ -97,7 +95,6 @@ event_int_to_string(int event, int blen, char *buffer)
   case DNS_EVENT_EVENTS_START:
     return "DNS_EVENT_EVENTS_START";
 
-
   case MULTI_CACHE_EVENT_SYNC:
     return "MULTI_CACHE_EVENT_SYNC";
 
@@ -134,7 +131,6 @@ event_int_to_string(int event, int blen, char *buffer)
   case CACHE_EVENT_RESPONSE_MSG:
     return "CACHE_EVENT_RESPONSE_MSG";
 
-
   case MGMT_EVENT_SHUTDOWN:
     return "MGMT_EVENT_SHUTDOWN";
   case MGMT_EVENT_RESTART:

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/FetchSM.cc
----------------------------------------------------------------------
diff --git a/proxy/FetchSM.cc b/proxy/FetchSM.cc
index 3fd726c..a015bdf 100644
--- a/proxy/FetchSM.cc
+++ b/proxy/FetchSM.cc
@@ -67,9 +67,12 @@ FetchSM::httpConnect()
   http_vc = reinterpret_cast<PluginVC *>(TSHttpConnectWithPluginId(&_addr.sa, tag, id));
 
   /*
-   * TS-2906: We need a way to unset internal request when using FetchSM, the use case for this
-   * is SPDY when it creates outgoing requests it uses FetchSM and the outgoing requests
-   * are spawned via SPDY SYN packets which are definitely not internal requests.
+   * TS-2906: We need a way to unset internal request when using FetchSM, the
+   * use case for this
+   * is SPDY when it creates outgoing requests it uses FetchSM and the outgoing
+   * requests
+   * are spawned via SPDY SYN packets which are definitely not internal
+   * requests.
    */
   if (!is_internal_request) {
     PluginVC *other_side = reinterpret_cast<PluginVC *>(http_vc)->get_other_side();
@@ -363,7 +366,8 @@ FetchSM::get_info_from_buffer(IOBufferReader *the_reader)
   info = (char *)ats_malloc(sizeof(char) * (read_avail + 1));
   client_response = info;
 
-  // To maintain backwards compatability we don't allow chunking when it's not streaming.
+  // To maintain backwards compatability we don't allow chunking when it's not
+  // streaming.
   if (!(fetch_flags & TS_FETCH_FLAGS_STREAM) || !check_chunked()) {
     /* Read the data out of the reader */
     while (read_avail > 0) {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ICP.cc
----------------------------------------------------------------------
diff --git a/proxy/ICP.cc b/proxy/ICP.cc
index 89232a8..b750ebb 100644
--- a/proxy/ICP.cc
+++ b/proxy/ICP.cc
@@ -165,7 +165,6 @@ static ClassAllocator<ICPPeerReadCont> ICPPeerReadContAllocator("ICPPeerReadCont
 
 static Action *default_action = NULL;
 
-
 ICPHandlerCont::ICPHandlerCont(ICPProcessor *icpP) : PeriodicCont(icpP)
 {
 }
@@ -1745,7 +1744,6 @@ ICPProcessor::start()
   _mcastCB_handler = new ICPHandlerCont(this);
   SET_CONTINUATION_HANDLER(_mcastCB_handler, (ICPHandlerContHandler)&ICPHandlerCont::TossEvent);
 
-
   //
   // Build ICP peer list and setup listen sockets
   //

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ICPConfig.cc
----------------------------------------------------------------------
diff --git a/proxy/ICPConfig.cc b/proxy/ICPConfig.cc
index 6c84198..ae6fd16 100644
--- a/proxy/ICPConfig.cc
+++ b/proxy/ICPConfig.cc
@@ -622,8 +622,10 @@ ICPConfiguration::icp_config_change_callback(void *data, void *value, int startu
       ++n_colons;
     }
     if (n_colons != colons_per_entry) {
-      RecSignalWarning(REC_SIGNAL_CONFIG_ERROR, "read icp.config, invalid syntax, line %d: expected %d fields, found %d", ln,
-                       colons_per_entry, n_colons);
+      RecSignalWarning(REC_SIGNAL_CONFIG_ERROR, "read icp.config, invalid "
+                                                "syntax, line %d: expected %d "
+                                                "fields, found %d",
+                       ln, colons_per_entry, n_colons);
       error = 1;
       break;
     }
@@ -850,7 +852,6 @@ ParentSiblingPeer::GetICPPort()
   return _pconfig->GetICPPort();
 }
 
-
 sockaddr *
 ParentSiblingPeer::GetIP()
 {
@@ -1388,7 +1389,6 @@ dumpICPstats()
   }
 }
 
-
 void
 ICPProcessor::DumpICPConfig()
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ICPProcessor.cc
----------------------------------------------------------------------
diff --git a/proxy/ICPProcessor.cc b/proxy/ICPProcessor.cc
index 7978bf9..2cd53a1 100644
--- a/proxy/ICPProcessor.cc
+++ b/proxy/ICPProcessor.cc
@@ -49,7 +49,6 @@ ICPProcessorExt::start()
   _ICPpr->start();
 }
 
-
 Action *
 ICPProcessorExt::ICPQuery(Continuation *c, URL *url)
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ICPProcessor.h
----------------------------------------------------------------------
diff --git a/proxy/ICPProcessor.h b/proxy/ICPProcessor.h
index 08f1945..1039dde 100644
--- a/proxy/ICPProcessor.h
+++ b/proxy/ICPProcessor.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 /****************************************************************************
 
   ICPProcessor.h

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ICPlog.h
----------------------------------------------------------------------
diff --git a/proxy/ICPlog.h b/proxy/ICPlog.h
index bf11d8f..cb74449 100644
--- a/proxy/ICPlog.h
+++ b/proxy/ICPlog.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 /****************************************************************************
 
   ICPlog.h

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/IPAllow.cc
----------------------------------------------------------------------
diff --git a/proxy/IPAllow.cc b/proxy/IPAllow.cc
index 8a88866..f5fbe9e 100644
--- a/proxy/IPAllow.cc
+++ b/proxy/IPAllow.cc
@@ -94,7 +94,6 @@ IpAllow::release(IpAllow *lookup)
 //   End API functions
 //
 
-
 IpAllow::IpAllow(const char *config_var, const char *name, const char *action_val) : module_name(name), action(action_val)
 {
   ats_scoped_str config_path(RecConfigReadConfigPath(config_var));
@@ -207,7 +206,8 @@ IpAllow::BuildTable()
           SignalError(errBuf, alarmAlready);
         } else {
           // INKqa05845
-          // Search for "action=ip_allow method=PURGE method=GET ..." or "action=ip_deny method=PURGE method=GET ...".
+          // Search for "action=ip_allow method=PURGE method=GET ..." or
+          // "action=ip_deny method=PURGE method=GET ...".
           char *label, *val;
           uint32_t acl_method_mask = 0;
           AclRecord::MethodSet nonstandard_methods;
@@ -278,7 +278,8 @@ IpAllow::BuildTable()
             _map.fill(&addr1, &addr2, reinterpret_cast<void *>(_acls.length() - 1));
           } else {
             snprintf(errBuf, sizeof(errBuf), "%s discarding %s entry at line %d : %s", module_name, config_file_path, line_num,
-                     "Invalid action/method specified"); // changed by YTS Team, yamsat bug id -59022
+                     "Invalid action/method specified"); // changed by YTS Team,
+                                                         // yamsat bug id -59022
             SignalError(errBuf, alarmAlready);
           }
         }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/InkAPITest.cc
----------------------------------------------------------------------
diff --git a/proxy/InkAPITest.cc b/proxy/InkAPITest.cc
index 25b99da..06ccab5 100644
--- a/proxy/InkAPITest.cc
+++ b/proxy/InkAPITest.cc
@@ -72,7 +72,6 @@ SDK_RPRINT(RegressionTest *t, const char *api_name, const char *testcase_name, i
   return (l);
 }
 
-
 /*
   REGRESSION_TEST(SDK_<test_name>)(RegressionTest *t, int atype, int *pstatus)
 
@@ -133,7 +132,6 @@ REGRESSION_TEST(SDK_API_TSTrafficServerVersionGet)(RegressionTest *test, int /*
   return;
 }
 
-
 ////////////////////////////////////////////////
 //       SDK_API_TSPluginDirGet
 //
@@ -178,7 +176,6 @@ REGRESSION_TEST(SDK_API_TSPluginDirGet)(RegressionTest *test, int /* atype ATS_U
   return;
 }
 
-
 /* TSConfig */
 ////////////////////////////////////////////////
 //       SDK_API_TSConfig
@@ -288,8 +285,10 @@ server_handler(TSCont contp, TSEvent event, void *data)
     params->vc = (TSVConn)data;
     TSVConnRead((TSVConn)data, contp, params->buffer, 100);
   } else if (event == TS_EVENT_VCONN_EOS) {
-    // The server end of the test passes if it receives an EOF event. This means that it must have
-    // connected to the endpoint. Since this always happens *after* the accept, we know that it is
+    // The server end of the test passes if it receives an EOF event. This means
+    // that it must have
+    // connected to the endpoint. Since this always happens *after* the accept,
+    // we know that it is
     // safe to delete the params.
     TSContDestroy(contp);
 
@@ -354,9 +353,11 @@ client_handler(TSCont contp, TSEvent event, void *data)
 
     SDK_RPRINT(params->test, params->api, "TSNetConnect", TC_PASS, "ok");
 
-    // XXX We really ought to do a write/read exchange with the server. The sleep above works around this.
+    // XXX We really ought to do a write/read exchange with the server. The
+    // sleep above works around this.
 
-    // Looks good from the client end. Next we disconnect so that the server end can set the final test status.
+    // Looks good from the client end. Next we disconnect so that the server end
+    // can set the final test status.
     TSVConnClose((TSVConn)data);
   }
 
@@ -463,10 +464,8 @@ REGRESSION_TEST(SDK_API_TSPortDescriptor)(RegressionTest *test, int /* atype ATS
 //  - remove it from the cache
 //  - try to read it (should faild)
 
-
 #define OBJECT_SIZE 100000 // size of the object we'll write/read/remove in cache
 
-
 RegressionTest *SDK_Cache_test;
 int *SDK_Cache_pstatus;
 static char content[OBJECT_SIZE];
@@ -635,7 +634,6 @@ cache_handler(TSCont contp, TSEvent event, void *data)
     }
     Debug(UTDBG_TAG "_cache_write", "finishing up [d]");
 
-
     if (TSVIOBufferGet(cache_vconn->write_vio) != cache_vconn->bufp) {
       SDK_RPRINT(SDK_Cache_test, "TSVIOBufferGet", "TestCase1", TC_FAIL, "write_vio corrupted");
       *SDK_Cache_pstatus = REGRESSION_TEST_FAILED;
@@ -686,14 +684,12 @@ cache_handler(TSCont contp, TSEvent event, void *data)
 
     Debug(UTDBG_TAG "_cache_write", "finishing up [h]");
 
-
     // start to read data out of cache
     read_counter++;
     TSCacheRead(contp, cache_vconn->key);
     Debug(UTDBG_TAG "_cache_read", "starting read [i]");
     return 1;
 
-
   case TS_EVENT_VCONN_WRITE_READY:
     Debug(UTDBG_TAG "_cache_event", "TS_EVENT_VCONN_WRITE_READY %d %p", event, data);
     if ((TSVIO)data != cache_vconn->write_vio) {
@@ -1199,7 +1195,6 @@ REGRESSION_TEST(SDK_API_TSThread)(RegressionTest *test, int /* atype ATS_UNUSED
   }
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSThread
 //
@@ -1346,7 +1341,6 @@ cont_handler(TSCont /* contp ATS_UNUSED */, TSEvent /* event ATS_UNUSED */, void
   return 0;
 }
 
-
 REGRESSION_TEST(SDK_API_TSContCreate)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -1371,7 +1365,6 @@ REGRESSION_TEST(SDK_API_TSContCreate)(RegressionTest *test, int /* atype ATS_UNU
   TSContDestroy(contp);
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSCont
 //
@@ -1412,7 +1405,6 @@ cont_data_handler(TSCont contp, TSEvent /* event ATS_UNUSED */, void * /* edata
   return 0;
 }
 
-
 REGRESSION_TEST(SDK_API_TSContDataGet)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -1432,7 +1424,6 @@ REGRESSION_TEST(SDK_API_TSContDataGet)(RegressionTest *test, int /* atype ATS_UN
   TSContSchedule(contp, 0, TS_THREAD_POOL_DEFAULT);
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSCont
 //
@@ -1601,7 +1592,6 @@ REGRESSION_TEST(SDK_API_TSIOBufferCreate)(RegressionTest *test, int /* atype ATS
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSIOBuffer
 //
@@ -1639,7 +1629,6 @@ REGRESSION_TEST(SDK_API_TSIOBufferProduce)(RegressionTest *test, int /* atype AT
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSIOBuffer
 //
@@ -1733,7 +1722,6 @@ REGRESSION_TEST(SDK_API_TSIOBufferStart)(RegressionTest *test, int /* atype ATS_
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSIOBuffer
 //
@@ -1747,7 +1735,8 @@ REGRESSION_TEST(SDK_API_TSIOBufferCopy)(RegressionTest *test, int /* atype ATS_U
   bool test_passed = false;
   *pstatus = REGRESSION_TEST_INPROGRESS;
 
-  char input_buf[] = "This is the test for TSIOBufferCopy, TSIOBufferWrite, TSIOBufferReaderCopy";
+  char input_buf[] = "This is the test for TSIOBufferCopy, TSIOBufferWrite, "
+                     "TSIOBufferReaderCopy";
   char output_buf[1024];
   TSIOBuffer bufp = TSIOBufferSizedCreate(TS_IOBUFFER_SIZE_INDEX_4K);
   TSIOBuffer bufp2 = TSIOBufferSizedCreate(TS_IOBUFFER_SIZE_INDEX_4K);
@@ -1860,7 +1849,6 @@ REGRESSION_TEST(SDK_API_TSIOBufferBlockNext)(RegressionTest *test, int /* atype
   return;
 }
 
-
 REGRESSION_TEST(SDK_API_TSContSchedule)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -1916,7 +1904,6 @@ typedef struct {
   unsigned int magic;
 } SocketTest;
 
-
 // This func is called by us from mytest_handler to test TSHttpTxnClientIPGet
 static int
 checkHttpTxnClientIPGet(SocketTest *test, void *data)
@@ -1924,7 +1911,8 @@ checkHttpTxnClientIPGet(SocketTest *test, void *data)
   sockaddr const *ptr;
   in_addr_t ip;
   TSHttpTxn txnp = (TSHttpTxn)data;
-  in_addr_t actual_ip = htonl(INADDR_LOOPBACK); /* 127.0.0.1 is expected because the client is on the same machine */
+  in_addr_t actual_ip = htonl(INADDR_LOOPBACK); /* 127.0.0.1 is expected because the client is on the
+                                                   same machine */
 
   ptr = TSHttpTxnClientAddrGet(txnp);
   if (ptr == 0 || INADDR_ANY == (ip = ats_ip4_addr_cast(ptr))) {
@@ -1945,12 +1933,14 @@ checkHttpTxnClientIPGet(SocketTest *test, void *data)
   return TS_EVENT_CONTINUE;
 }
 
-// This func is called by us from mytest_handler to check for TSHttpTxnNextHopIPGet
+// This func is called by us from mytest_handler to check for
+// TSHttpTxnNextHopIPGet
 static int
 checkHttpTxnNextHopIPGet(SocketTest *test, void *data)
 {
   TSHttpTxn txnp = (TSHttpTxn)data;
-  in_addr_t actual_ip = htonl(INADDR_LOOPBACK); /* 127.0.0.1 is expected because the client is on the same machine */
+  in_addr_t actual_ip = htonl(INADDR_LOOPBACK); /* 127.0.0.1 is expected because the client is on the
+                                                   same machine */
   sockaddr const *ptr;
   in_addr_t nexthopip;
 
@@ -1974,7 +1964,6 @@ checkHttpTxnNextHopIPGet(SocketTest *test, void *data)
   return TS_EVENT_CONTINUE;
 }
 
-
 // This func is called by us from mytest_handler to test TSHttpTxnServerIPGet
 static int
 checkHttpTxnServerIPGet(SocketTest *test, void *data)
@@ -1982,7 +1971,8 @@ checkHttpTxnServerIPGet(SocketTest *test, void *data)
   sockaddr const *ptr;
   in_addr_t ip;
   TSHttpTxn txnp = (TSHttpTxn)data;
-  in_addr_t actual_ip = htonl(INADDR_LOOPBACK); /* 127.0.0.1 is expected because the client is on the same machine */
+  in_addr_t actual_ip = htonl(INADDR_LOOPBACK); /* 127.0.0.1 is expected because the client is on the
+                                                   same machine */
 
   ptr = TSHttpTxnServerAddrGet(txnp);
   if (0 == ptr || 0 == (ip = ats_ip4_addr_cast(ptr))) {
@@ -2000,11 +1990,11 @@ checkHttpTxnServerIPGet(SocketTest *test, void *data)
     SDK_RPRINT(test->regtest, "TSHttpTxnServerIPGet", "TestCase1", TC_FAIL, "Value's Mismatch");
   }
 
-
   return TS_EVENT_CONTINUE;
 }
 
-// This func is called by us from mytest_handler to test TSHttpTxnIncomingAddrGet
+// This func is called by us from mytest_handler to test
+// TSHttpTxnIncomingAddrGet
 static int
 checkHttpTxnIncomingAddrGet(SocketTest *test, void *data)
 {
@@ -2174,7 +2164,6 @@ checkHttpTxnServerRespGet(SocketTest *test, void *data)
   return TS_EVENT_CONTINUE;
 }
 
-
 // This func is called both by us when scheduling EVENT_IMMEDIATE
 // And by HTTP SM for registered hooks
 static int
@@ -2329,7 +2318,6 @@ mytest_handler(TSCont contp, TSEvent event, void *data)
   return TS_EVENT_IMMEDIATE;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpHookAdd)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -2368,7 +2356,10 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpHookAdd)(RegressionTest *test, int /* atyp
 
   /* Create a client transaction */
   socktest->browser = synclient_txn_create();
-  char *request = generate_request(HTTP_HOOK_TEST_REQUEST_ID); // this request has a no-cache that prevents caching
+  char *request = generate_request(HTTP_HOOK_TEST_REQUEST_ID); // this request
+                                                               // has a no-cache
+                                                               // that prevents
+                                                               // caching
   synclient_txn_send_request(socktest->browser, request);
   TSfree(request);
 
@@ -2380,7 +2371,6 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpHookAdd)(RegressionTest *test, int /* atyp
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSUrl
 //
@@ -2572,7 +2562,6 @@ REGRESSION_TEST(SDK_API_TSUrl)(RegressionTest *test, int /* atype ATS_UNUSED */,
            ((query == NULL) ? "" : "?"), ((query == NULL) ? "" : query), ((fragment == NULL) ? "" : "#"),
            ((fragment == NULL) ? "" : fragment));
 
-
   // Set Functions
 
   bufp1 = TSMBufferCreate();
@@ -2735,7 +2724,6 @@ REGRESSION_TEST(SDK_API_TSUrl)(RegressionTest *test, int /* atype ATS_UNUSED */,
       SDK_RPRINT(test, "TSUrlCopy", "TestCase1", TC_FAIL, "Values don't match");
     }
 
-
     // String Test Case 2
     url_string_from_2 = TSUrlStringGet(bufp2, url_loc2, &tmp_len);
     if (strcmp(url_string_from_2, url_expected_string) == 0) {
@@ -2829,22 +2817,38 @@ print_results:
       (test_passed_string1 == false) || (test_passed_string2 == false) || (test_passed_print == false) ||
       (test_passed_length1 == false) || (test_passed_length2 == false) || (test_passed_type == false)) {
     /*** Debugging the test itself....
-    (test_passed_create == false)?printf("test_passed_create is false\n"):printf("");
-    (test_passed_destroy == false)?printf("test_passed_destroy is false\n"):printf("");
-    (test_passed_user == false)?printf("test_passed_user is false\n"):printf("");
-    (test_passed_password == false)?printf("test_passed_password is false\n"):printf("");
-    (test_passed_host == false)?printf("test_passed_host is false\n"):printf("");
-    (test_passed_port == false)?printf("test_passed_port is false\n"):printf("");
-    (test_passed_path == false)?printf("test_passed_path is false\n"):printf("");
-    (test_passed_params == false)?printf("test_passed_params is false\n"):printf("");
-    (test_passed_query == false)?printf("test_passed_query is false\n"):printf("");
-    (test_passed_fragment == false)?printf("test_passed_fragment is false\n"):printf("");
-    (test_passed_copy == false)?printf("test_passed_copy is false\n"):printf("");
-    (test_passed_string1 == false)?printf("test_passed_string1 is false\n"):printf("");
-    (test_passed_string2 == false)?printf("test_passed_string2 is false\n"):printf("");
-    (test_passed_length1 == false)?printf("test_passed_length1 is false\n"):printf("");
-    (test_passed_length2 == false)?printf("test_passed_length2 is false\n"):printf("");
-    (test_passed_type == false)?printf("test_passed_type is false\n"):printf("");
+    (test_passed_create == false)?printf("test_passed_create is
+    false\n"):printf("");
+    (test_passed_destroy == false)?printf("test_passed_destroy is
+    false\n"):printf("");
+    (test_passed_user == false)?printf("test_passed_user is
+    false\n"):printf("");
+    (test_passed_password == false)?printf("test_passed_password is
+    false\n"):printf("");
+    (test_passed_host == false)?printf("test_passed_host is
+    false\n"):printf("");
+    (test_passed_port == false)?printf("test_passed_port is
+    false\n"):printf("");
+    (test_passed_path == false)?printf("test_passed_path is
+    false\n"):printf("");
+    (test_passed_params == false)?printf("test_passed_params is
+    false\n"):printf("");
+    (test_passed_query == false)?printf("test_passed_query is
+    false\n"):printf("");
+    (test_passed_fragment == false)?printf("test_passed_fragment is
+    false\n"):printf("");
+    (test_passed_copy == false)?printf("test_passed_copy is
+    false\n"):printf("");
+    (test_passed_string1 == false)?printf("test_passed_string1 is
+    false\n"):printf("");
+    (test_passed_string2 == false)?printf("test_passed_string2 is
+    false\n"):printf("");
+    (test_passed_length1 == false)?printf("test_passed_length1 is
+    false\n"):printf("");
+    (test_passed_length2 == false)?printf("test_passed_length2 is
+    false\n"):printf("");
+    (test_passed_type == false)?printf("test_passed_type is
+    false\n"):printf("");
     .....***********/
     *pstatus = REGRESSION_TEST_FAILED;
   } else {
@@ -2874,7 +2878,8 @@ print_results:
 //////////////////////////////////////////////
 
 /**
- * If you change value of any constant in this function then reflect that change in variable expected_iobuf.
+ * If you change value of any constant in this function then reflect that change
+ * in variable expected_iobuf.
  */
 REGRESSION_TEST(SDK_API_TSHttpHdr)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
@@ -2939,7 +2944,6 @@ REGRESSION_TEST(SDK_API_TSHttpHdr)(RegressionTest *test, int /* atype ATS_UNUSED
   bool try_print_function = true;
   bool test_buffer_created = true;
 
-
   *pstatus = REGRESSION_TEST_INPROGRESS;
 
   bufp1 = TSMBufferCreate();
@@ -2958,7 +2962,6 @@ REGRESSION_TEST(SDK_API_TSHttpHdr)(RegressionTest *test, int /* atype ATS_UNUSED
     SDK_RPRINT(test, "TSHttpHdrCreate", "All Test Cases", TC_FAIL, "Cannot run test as unable to allocate MBuffers");
   }
 
-
   // Type
   if (test_passed_Http_Hdr_Create == true) {
     if ((TSHttpHdrTypeSet(bufp1, hdr_loc1, TS_HTTP_TYPE_REQUEST) == TS_ERROR) ||
@@ -3365,7 +3368,6 @@ REGRESSION_TEST(SDK_API_TSHttpHdr)(RegressionTest *test, int /* atype ATS_UNUSED
     SDK_RPRINT(test, "TSHttpHdrClone", "All Test Cases", TC_PASS, "Cannot run test as TSHttpHdrCreate has failed");
   }
 
-
   // LengthGet
   if (test_passed_Http_Hdr_Create == true) {
     actual_length = TSHttpHdrLengthGet(bufp1, hdr_loc1);
@@ -3485,7 +3487,6 @@ REGRESSION_TEST(SDK_API_TSHttpHdr)(RegressionTest *test, int /* atype ATS_UNUSED
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSMimeHdrField
 //
@@ -3554,7 +3555,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
   TSMLoc field_loc14 = (TSMLoc)NULL;
   TSMLoc field_loc15 = (TSMLoc)NULL;
 
-
   const char *field1Name = "field1";
   const char *field2Name = "field2";
   const char *field3Name = "field3";
@@ -3722,7 +3722,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
     SDK_RPRINT(test, "TSMimeHdrFieldCreate", "All Test Case", TC_FAIL, "Cannot run test as Test for TSMimeHdrCreate Failed");
   }
 
-
   // TSMimeHdrFieldNameGet&Set
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if ((TSMimeHdrFieldNameSet(bufp1, mime_loc1, field_loc11, field1Name, -1) == TS_ERROR) ||
@@ -3753,7 +3752,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
                "Cannot run test as Test for TSMBufferFieldCreate Failed");
   }
 
-
   // TSMimeHdrFieldAppend, TSMimeHdrFieldGet, TSMimeHdrFieldNext
   if (test_passed_Mime_Hdr_Field_Name == true) {
     if ((TSMimeHdrFieldAppend(bufp1, mime_loc1, field_loc11) != TS_SUCCESS) ||
@@ -3765,8 +3763,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
     } else {
       if (TS_NULL_MLOC == (test_field_loc11 = TSMimeHdrFieldGet(bufp1, mime_loc1, 0))) {
         SDK_RPRINT(test, "TSMimeHdrFieldAppend", "TestCase1|2|3|4|5", TC_FAIL, "TSMimeHdrFieldGet Returns TS_NULL_MLOC");
-        SDK_RPRINT(test, "TSMimeHdrFieldNext", "TestCase1", TC_FAIL,
-                   "Cannot Test TSMimeHdrFieldNext as TSMimeHdrFieldGet Returns TS_NULL_MLOC");
+        SDK_RPRINT(test, "TSMimeHdrFieldNext", "TestCase1", TC_FAIL, "Cannot Test TSMimeHdrFieldNext as TSMimeHdrFieldGet "
+                                                                     "Returns TS_NULL_MLOC");
         SDK_RPRINT(test, "TSMimeHdrFieldGet", "TestCase1", TC_FAIL, "TSMimeHdrFieldGet Returns TS_NULL_MLOC");
       } else {
         if (compare_field_names(test, bufp1, mime_loc1, field_loc11, bufp1, mime_loc1, test_field_loc11) == TS_ERROR) {
@@ -3849,8 +3847,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
           (TSHandleMLocRelease(bufp1, mime_loc1, test_field_loc13) == TS_ERROR) ||
           (TSHandleMLocRelease(bufp1, mime_loc1, test_field_loc14) == TS_ERROR) ||
           (TSHandleMLocRelease(bufp1, mime_loc1, test_field_loc15) == TS_ERROR)) {
-        SDK_RPRINT(test, "TSMimeHdrFieldAppend/Next/Get", "", TC_FAIL,
-                   "Unable to release handle using TSHandleMLocRelease. Can be bad handle.");
+        SDK_RPRINT(test, "TSMimeHdrFieldAppend/Next/Get", "", TC_FAIL, "Unable to release handle using TSHandleMLocRelease. Can be "
+                                                                       "bad handle.");
       }
     }
   } else {
@@ -3858,7 +3856,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
                "Cannot run test as Test for TSMimeHdrFieldNameGet&Set Failed");
   }
 
-
   // TSMimeHdrFieldsCount
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if ((numberOfFields = TSMimeHdrFieldsCount(bufp1, mime_loc1)) < 0) {
@@ -3891,7 +3888,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
     SDK_RPRINT(test, "TSMimeHdrFieldsCount", "TestCase1", TC_FAIL, "Cannot run Test as TSMimeHdrFieldCreate failed");
   }
 
-  // TSMimeHdrFieldValueStringInsert, TSMimeHdrFieldValueStringGet, TSMimeHdrFieldValueStringSet
+  // TSMimeHdrFieldValueStringInsert, TSMimeHdrFieldValueStringGet,
+  // TSMimeHdrFieldValueStringSet
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if ((TSMimeHdrFieldValueStringInsert(bufp1, mime_loc1, field_loc11, -1, field1Value2, -1) == TS_ERROR) ||
         (TSMimeHdrFieldValueStringInsert(bufp1, mime_loc1, field_loc11, 0, field1Value1, -1) == TS_ERROR) ||
@@ -3901,9 +3899,11 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
       SDK_RPRINT(test, "TSMimeHdrFieldValueStringInsert", "TestCase1|2|3|4|5", TC_FAIL,
                  "TSMimeHdrFieldValueStringInsert Returns TS_ERROR");
       SDK_RPRINT(test, "TSMimeHdrFieldValueStringGet", "TestCase1&2&3&4&5", TC_FAIL,
-                 "Cannot run Test as TSMimeHdrFieldValueStringInsert returns TS_ERROR");
+                 "Cannot run Test as TSMimeHdrFieldValueStringInsert "
+                 "returns TS_ERROR");
       SDK_RPRINT(test, "TSMimeHdrFieldValueStringSet", "TestCase1", TC_FAIL,
-                 "Cannot run Test as TSMimeHdrFieldValueStringInsert returns TS_ERROR");
+                 "Cannot run Test as TSMimeHdrFieldValueStringInsert returns "
+                 "TS_ERROR");
     } else {
       field1Value1Get = TSMimeHdrFieldValueStringGet(bufp1, mime_loc1, field_loc11, 0, &lengthField1Value1);
       field1Value2Get = TSMimeHdrFieldValueStringGet(bufp1, mime_loc1, field_loc11, 1, &lengthField1Value2);
@@ -3942,16 +3942,16 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
       } else {
         SDK_RPRINT(test, "TSMimeHdrFieldValueStringInsert", "TestCase1|2|3|4|5", TC_PASS, "Value's Don't Match");
         SDK_RPRINT(test, "TSMimeHdrFieldValueStringGet", "TestCase1|2|3|4|5", TC_PASS, "Value's Don't Match");
-        SDK_RPRINT(test, "TSMimeHdrFieldValueStringSet", "TestCase1", TC_FAIL,
-                   "TSMimeHdrFieldValueStringSet cannot be tested as TSMimeHdrFieldValueStringInsert|Get failed");
+        SDK_RPRINT(test, "TSMimeHdrFieldValueStringSet", "TestCase1", TC_FAIL, "TSMimeHdrFieldValueStringSet cannot be tested as "
+                                                                               "TSMimeHdrFieldValueStringInsert|Get failed");
       }
     }
   } else {
     SDK_RPRINT(test, "TSMimeHdrFieldValueStringInsert&Set&Get", "All", TC_FAIL, "Cannot run Test as TSMimeHdrFieldCreate failed");
   }
 
-
-  // TSMimeHdrFieldValueDateInsert, TSMimeHdrFieldValueDateGet, TSMimeHdrFieldValueDateSet
+  // TSMimeHdrFieldValueDateInsert, TSMimeHdrFieldValueDateGet,
+  // TSMimeHdrFieldValueDateSet
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if (TSMimeHdrFieldValueDateInsert(bufp1, mime_loc1, field_loc12, field2Value1) == TS_ERROR) {
       SDK_RPRINT(test, "TSMimeHdrFieldValueDateInsert", "TestCase1", TC_FAIL, "TSMimeHdrFieldValueDateInsert Returns TS_ERROR");
@@ -3981,8 +3981,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
       } else {
         SDK_RPRINT(test, "TSMimeHdrFieldValueDateInsert", "TestCase1", TC_PASS, "Value's Don't Match");
         SDK_RPRINT(test, "TSMimeHdrFieldValueDateGet", "TestCase1", TC_PASS, "Value's Don't Match");
-        SDK_RPRINT(test, "TSMimeHdrFieldValueDateSet", "TestCase1", TC_FAIL,
-                   "TSMimeHdrFieldValueDateSet cannot be tested as TSMimeHdrFieldValueDateInsert|Get failed");
+        SDK_RPRINT(test, "TSMimeHdrFieldValueDateSet", "TestCase1", TC_FAIL, "TSMimeHdrFieldValueDateSet cannot be tested as "
+                                                                             "TSMimeHdrFieldValueDateInsert|Get failed");
       }
     }
   } else {
@@ -3990,8 +3990,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
                "Cannot run Test as TSMimeHdrFieldCreate failed");
   }
 
-
-  // TSMimeHdrFieldValueIntInsert, TSMimeHdrFieldValueIntGet, TSMimeHdrFieldValueIntSet
+  // TSMimeHdrFieldValueIntInsert, TSMimeHdrFieldValueIntGet,
+  // TSMimeHdrFieldValueIntSet
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if ((TSMimeHdrFieldValueIntInsert(bufp1, mime_loc1, field_loc13, -1, field3Value2) == TS_ERROR) ||
         (TSMimeHdrFieldValueIntInsert(bufp1, mime_loc1, field_loc13, 0, field3Value1) == TS_ERROR) ||
@@ -4030,15 +4030,16 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
       } else {
         SDK_RPRINT(test, "TSMimeHdrFieldValueIntInsert", "TestCase1|2|3|4|5", TC_PASS, "Value's Don't Match");
         SDK_RPRINT(test, "TSMimeHdrFieldValueIntGet", "TestCase1|2|3|4|5", TC_PASS, "Value's Don't Match");
-        SDK_RPRINT(test, "TSMimeHdrFieldValueIntSet", "TestCase1", TC_FAIL,
-                   "TSMimeHdrFieldValueIntSet cannot be tested as TSMimeHdrFieldValueIntInsert|Get failed");
+        SDK_RPRINT(test, "TSMimeHdrFieldValueIntSet", "TestCase1", TC_FAIL, "TSMimeHdrFieldValueIntSet cannot be tested as "
+                                                                            "TSMimeHdrFieldValueIntInsert|Get failed");
       }
     }
   } else {
     SDK_RPRINT(test, "TSMimeHdrFieldValueIntInsert&Set&Get", "All", TC_FAIL, "Cannot run Test as TSMimeHdrFieldCreate failed");
   }
 
-  // TSMimeHdrFieldValueUintInsert, TSMimeHdrFieldValueUintGet, TSMimeHdrFieldValueUintSet
+  // TSMimeHdrFieldValueUintInsert, TSMimeHdrFieldValueUintGet,
+  // TSMimeHdrFieldValueUintSet
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if ((TSMimeHdrFieldValueUintInsert(bufp1, mime_loc1, field_loc14, -1, field4Value2) == TS_ERROR) ||
         (TSMimeHdrFieldValueUintInsert(bufp1, mime_loc1, field_loc14, 0, field4Value1) == TS_ERROR) ||
@@ -4077,8 +4078,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
       } else {
         SDK_RPRINT(test, "TSMimeHdrFieldValueUintInsert", "TestCase1|2|3|4|5", TC_PASS, "Value's Don't Match");
         SDK_RPRINT(test, "TSMimeHdrFieldValueUintGet", "TestCase1|2|3|4|5", TC_PASS, "Value's Don't Match");
-        SDK_RPRINT(test, "TSMimeHdrFieldValueUintSet", "TestCase1", TC_FAIL,
-                   "TSMimeHdrFieldValueUintSet cannot be tested as TSMimeHdrFieldValueUintInsert|Get failed");
+        SDK_RPRINT(test, "TSMimeHdrFieldValueUintSet", "TestCase1", TC_FAIL, "TSMimeHdrFieldValueUintSet cannot be tested as "
+                                                                             "TSMimeHdrFieldValueUintInsert|Get failed");
       }
     }
   } else {
@@ -4098,8 +4099,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
     test_passed_Mime_Hdr_Field_Length_Get = true;
   }
 
-
-  // TSMimeHdrFieldValueAppend, TSMimeHdrFieldValueDelete, TSMimeHdrFieldValuesCount, TSMimeHdrFieldValuesClear
+  // TSMimeHdrFieldValueAppend, TSMimeHdrFieldValueDelete,
+  // TSMimeHdrFieldValuesCount, TSMimeHdrFieldValuesClear
 
   if (test_passed_Mime_Hdr_Field_Create == true) {
     if ((TSMimeHdrFieldValueStringInsert(bufp1, mime_loc1, field_loc15, -1, field5Value1, -1) == TS_ERROR) ||
@@ -4107,13 +4108,17 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
         (TSMimeHdrFieldValueStringInsert(bufp1, mime_loc1, field_loc15, -1, field5Value3, -1) == TS_ERROR) ||
         (TSMimeHdrFieldValueUintInsert(bufp1, mime_loc1, field_loc15, -1, field5Value4) == TS_ERROR)) {
       SDK_RPRINT(test, "TSMimeHdrFieldValueAppend", "TestCase1", TC_FAIL,
-                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. Cannot create field for testing.");
+                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. "
+                 "Cannot create field for testing.");
       SDK_RPRINT(test, "TSMimeHdrFieldValueDelete", "TestCase1", TC_FAIL,
-                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. Cannot create field for testing.");
+                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. "
+                 "Cannot create field for testing.");
       SDK_RPRINT(test, "TSMimeHdrFieldValuesCount", "TestCase1", TC_FAIL,
-                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. Cannot create field for testing.");
+                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. "
+                 "Cannot create field for testing.");
       SDK_RPRINT(test, "TSMimeHdrFieldValuesClear", "TestCase1", TC_FAIL,
-                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. Cannot create field for testing.");
+                 "TSMimeHdrFieldValueString|Int|UintInsert returns TS_ERROR. "
+                 "Cannot create field for testing.");
     } else {
       if (TSMimeHdrFieldValueAppend(bufp1, mime_loc1, field_loc15, 0, field5Value1Append, -1) == TS_ERROR) {
         SDK_RPRINT(test, "TSMimeHdrFieldValueAppend", "TestCase1", TC_FAIL, "TSMimeHdrFieldValueAppend returns TS_ERROR");
@@ -4150,7 +4155,8 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
         if ((strncmp(fieldValueDeleteGet, field5Value3, lengthFieldValueDeleteGet) == 0) &&
             (lengthFieldValueDeleteGet == (int)strlen(field5Value3))) {
           SDK_RPRINT(test, "TSMimeHdrFieldValueDelete", "TestCase1", TC_FAIL,
-                     "Value not deleted from field or incorrect index deleted from field.");
+                     "Value not deleted from field or incorrect index deleted "
+                     "from field.");
         } else {
           SDK_RPRINT(test, "TSMimeHdrFieldValueDelete", "TestCase1", TC_PASS, "ok");
           test_passed_Mime_Hdr_Field_Value_Delete = true;
@@ -4231,10 +4237,12 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
       SDK_RPRINT(test, "TSMimeHdrDestroy", "TestCase1", TC_PASS, "ok");
       test_passed_Mime_Hdr_Destroy = true;
     }
-    /** Commented out as Traffic Server was crashing. Will have to look into it. */
+    /** Commented out as Traffic Server was crashing. Will have to look into it.
+     */
     /*
        if (TSHandleMLocRelease(bufp1,TS_NULL_MLOC,mime_loc1)==TS_ERROR) {
-       SDK_RPRINT(test,"TSHandleMLocRelease","TSMimeHdrDestroy",TC_FAIL,"unable to release handle using TSHandleMLocRelease");
+       SDK_RPRINT(test,"TSHandleMLocRelease","TSMimeHdrDestroy",TC_FAIL,"unable
+       to release handle using TSHandleMLocRelease");
        }
      */
   } else {
@@ -4254,7 +4262,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
     SDK_RPRINT(test, "TSMimeHdrDestroy", "TestCase1", TC_FAIL, "Cannot run test as TSMimeHdrCreate failed");
   }
 
-
   if ((test_passed_MBuffer_Create == true) && (test_passed_Mime_Hdr_Create == true) &&
       (test_passed_Mime_Hdr_Field_Create == true) && (test_passed_Mime_Hdr_Field_Name == true) &&
       (test_passed_Mime_Hdr_Field_Append == true) && (test_passed_Mime_Hdr_Field_Get == true) &&
@@ -4277,7 +4284,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrField)(RegressionTest *test, int /* atype ATS_U
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSHttpHdrParse
 //
@@ -4365,9 +4371,13 @@ convert_http_hdr_to_string(TSMBuffer bufp, TSMLoc hdr_loc)
 
 REGRESSION_TEST(SDK_API_TSHttpHdrParse)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
-  const char *req =
-    "GET http://www.example.com/ HTTP/1.1\r\nmimefield1:field1value1,field1value2\r\nmimefield2:field2value1,field2value2\r\n\r\n";
-  const char *resp = "HTTP/1.1 200 OK\r\n1mimefield:1field1value,1field2value\r\n2mimefield:2field1value,2field2value\r\n\r\n";
+  const char *req = "GET http://www.example.com/ "
+                    "HTTP/"
+                    "1.1\r\nmimefield1:field1value1,field1value2\r\nmimefield2:"
+                    "field2value1,field2value2\r\n\r\n";
+  const char *resp = "HTTP/1.1 200 "
+                     "OK\r\n1mimefield:1field1value,1field2value\r\n2mimefield:"
+                     "2field1value,2field2value\r\n\r\n";
   const char *start;
   const char *end;
   char *temp;
@@ -4476,7 +4486,6 @@ REGRESSION_TEST(SDK_API_TSHttpHdrParse)(RegressionTest *test, int /* atype ATS_U
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSMimeHdrParse
 //
@@ -4604,11 +4613,11 @@ compare_field_values(RegressionTest *test, TSMBuffer bufp1, TSMLoc hdr_loc1, TSM
   return TS_SUCCESS;
 }
 
-
 REGRESSION_TEST(SDK_API_TSMimeHdrParse)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
-  const char *parse_string =
-    "field1:field1Value1,field1Value2\r\nfield2:10,-34,45\r\nfield3:field3Value1,23\r\nfield2: 2345, field2Value2\r\n\r\n";
+  const char *parse_string = "field1:field1Value1,field1Value2\r\nfield2:10,-"
+                             "34,45\r\nfield3:field3Value1,23\r\nfield2: 2345, "
+                             "field2Value2\r\n\r\n";
   const char *DUPLICATE_FIELD_NAME = "field2";
   const char *REMOVE_FIELD_NAME = "field3";
 
@@ -4708,7 +4717,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrParse)(RegressionTest *test, int /* atype ATS_U
     SDK_RPRINT(test, "TSMimeHdrLengthGet", "TestCase1", TC_FAIL, "Cannot run test as unable to create a parser");
   }
 
-
   // HOW DO I CHECK FOR PARSER CLEAR????
   if (test_passed_parser_create == true) {
     TSMimeParserClear(parser);
@@ -4718,7 +4726,6 @@ REGRESSION_TEST(SDK_API_TSMimeHdrParse)(RegressionTest *test, int /* atype ATS_U
     SDK_RPRINT(test, "TSMimeParserClear", "TestCase1", TC_FAIL, "Cannot run test as unable to create a parser");
   }
 
-
   if (test_passed_parser_create == true) {
     TSMimeParserDestroy(parser);
     SDK_RPRINT(test, "TSMimeParserDestroy", "TestCase1", TC_PASS, "ok");
@@ -5028,15 +5035,16 @@ REGRESSION_TEST(SDK_API_TSUrlParse)(RegressionTest *test, int /* atype ATS_UNUSE
     "http://www.example.com/homepage.cgi;ab?abc=def#abc", "http://abc:def@www.example.com:3426/homepage.cgi;ab?abc=def#abc",
     "https://abc:def@www.example.com:3426/homepage.cgi;ab?abc=def#abc",
     "ftp://abc:def@www.example.com:3426/homepage.cgi;ab?abc=def#abc",
-    "file:///c:/test.dat;ab?abc=def#abc", // Note: file://c: is malformed URL because no host is present.
-    "file:///test.dat;ab?abc=def#abc", "foo://bar.com/baz/",
-    "http://a.b.com/xx.jpg?newpath=http://b.c.com" // https://issues.apache.org/jira/browse/TS-1635
+    "file:///c:/test.dat;ab?abc=def#abc", // Note: file://c: is
+                                          // malformed URL because no
+                                          // host is present.
+    "file:///test.dat;ab?abc=def#abc", "foo://bar.com/baz/", "http://a.b.com/xx.jpg?newpath=http://"
+                                                             "b.c.com" // https://issues.apache.org/jira/browse/TS-1635
   };
 
   static int const num_urls = sizeof(urls) / sizeof(urls[0]);
   bool test_passed[num_urls] = {false};
 
-
   const char *start;
   const char *end;
   char *temp;
@@ -5049,7 +5057,6 @@ REGRESSION_TEST(SDK_API_TSUrlParse)(RegressionTest *test, int /* atype ATS_UNUSE
 
   *pstatus = REGRESSION_TEST_INPROGRESS;
 
-
   int idx;
   for (idx = 0; idx < num_urls; idx++) {
     char const *url = urls[idx];
@@ -5096,7 +5103,6 @@ REGRESSION_TEST(SDK_API_TSUrlParse)(RegressionTest *test, int /* atype ATS_UNUSE
     *pstatus = REGRESSION_TEST_PASSED;
   }
 
-
   return;
 }
 
@@ -5118,7 +5124,6 @@ typedef struct {
   TSTextLogObject log;
 } LogTestData;
 
-
 static int
 log_test_handler(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */)
 {
@@ -5167,7 +5172,6 @@ log_test_handler(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */)
   *(data->pstatus) = REGRESSION_TEST_PASSED;
   SDK_RPRINT(data->test, "TSTextLogObject", "TestCase1", TC_PASS, "ok");
 
-
   // figure out the matainfo file for cleanup.
   // code from MetaInfo::_build_name(const char *filename)
   int i = -1, l = 0;
@@ -5213,7 +5217,8 @@ REGRESSION_TEST(SDK_API_TSTextLog)(RegressionTest *test, int /* atype ATS_UNUSED
   char logname[PATH_NAME_MAX];
   char fullpath_logname[PATH_NAME_MAX];
 
-  /* Generate a random log file name, so if we run the test several times, we won't use the
+  /* Generate a random log file name, so if we run the test several times, we
+     won't use the
      same log file name. */
   ats_scoped_str tmp(RecConfigReadLogDir());
   snprintf(logname, sizeof(logname), "RegressionTestLog%d.log", (int)getpid());
@@ -5254,7 +5259,6 @@ REGRESSION_TEST(SDK_API_TSTextLog)(RegressionTest *test, int /* atype ATS_UNUSED
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSMgmtGet
 //
@@ -5315,9 +5319,9 @@ REGRESSION_TEST(SDK_API_TSMgmtGet)(RegressionTest *test, int /* atype ATS_UNUSED
     SDK_RPRINT(test, "TSMgmtStringGet", "TestCase1.4", TC_FAIL, "can not get value of param %s", CONFIG_PARAM_STRING_NAME);
     err = 1;
   } else if (strcmp(svalue, CONFIG_PARAM_STRING_VALUE) != 0) {
-    SDK_RPRINT(test, "TSMgmtStringGet", "TestCase1.4", TC_FAIL,
-               "got incorrect value of param %s, should have been \"%s\", found \"%s\"", CONFIG_PARAM_STRING_NAME,
-               CONFIG_PARAM_STRING_VALUE, svalue);
+    SDK_RPRINT(test, "TSMgmtStringGet", "TestCase1.4", TC_FAIL, "got incorrect value of param %s, should have been \"%s\", "
+                                                                "found \"%s\"",
+               CONFIG_PARAM_STRING_NAME, CONFIG_PARAM_STRING_VALUE, svalue);
     err = 1;
   } else {
     SDK_RPRINT(test, "TSMgmtStringGet", "TestCase1.4", TC_PASS, "ok");
@@ -5333,7 +5337,6 @@ REGRESSION_TEST(SDK_API_TSMgmtGet)(RegressionTest *test, int /* atype ATS_UNUSED
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSConstant
 //
@@ -5349,7 +5352,6 @@ REGRESSION_TEST(SDK_API_TSMgmtGet)(RegressionTest *test, int /* atype ATS_UNUSED
     }                                                                                                                  \
   }
 
-
 typedef enum {
   ORIG_TS_PARSE_ERROR = -1,
   ORIG_TS_PARSE_DONE = 0,
@@ -5365,10 +5367,8 @@ typedef enum {
 
 typedef enum {
   ORIG_TS_HTTP_STATUS_NONE = 0,
-
   ORIG_TS_HTTP_STATUS_CONTINUE = 100,
   ORIG_TS_HTTP_STATUS_SWITCHING_PROTOCOL = 101,
-
   ORIG_TS_HTTP_STATUS_OK = 200,
   ORIG_TS_HTTP_STATUS_CREATED = 201,
   ORIG_TS_HTTP_STATUS_ACCEPTED = 202,
@@ -5376,14 +5376,12 @@ typedef enum {
   ORIG_TS_HTTP_STATUS_NO_CONTENT = 204,
   ORIG_TS_HTTP_STATUS_RESET_CONTENT = 205,
   ORIG_TS_HTTP_STATUS_PARTIAL_CONTENT = 206,
-
   ORIG_TS_HTTP_STATUS_MULTIPLE_CHOICES = 300,
   ORIG_TS_HTTP_STATUS_MOVED_PERMANENTLY = 301,
   ORIG_TS_HTTP_STATUS_MOVED_TEMPORARILY = 302,
   ORIG_TS_HTTP_STATUS_SEE_OTHER = 303,
   ORIG_TS_HTTP_STATUS_NOT_MODIFIED = 304,
   ORIG_TS_HTTP_STATUS_USE_PROXY = 305,
-
   ORIG_TS_HTTP_STATUS_BAD_REQUEST = 400,
   ORIG_TS_HTTP_STATUS_UNAUTHORIZED = 401,
   ORIG_TS_HTTP_STATUS_PAYMENT_REQUIRED = 402,
@@ -5400,7 +5398,6 @@ typedef enum {
   ORIG_TS_HTTP_STATUS_REQUEST_ENTITY_TOO_LARGE = 413,
   ORIG_TS_HTTP_STATUS_REQUEST_URI_TOO_LONG = 414,
   ORIG_TS_HTTP_STATUS_UNSUPPORTED_MEDIA_TYPE = 415,
-
   ORIG_TS_HTTP_STATUS_INTERNAL_SERVER_ERROR = 500,
   ORIG_TS_HTTP_STATUS_NOT_IMPLEMENTED = 501,
   ORIG_TS_HTTP_STATUS_BAD_GATEWAY = 502,
@@ -5440,20 +5437,16 @@ typedef enum {
   ORIG_TS_EVENT_TIMEOUT = 2,
   ORIG_TS_EVENT_ERROR = 3,
   ORIG_TS_EVENT_CONTINUE = 4,
-
   ORIG_TS_EVENT_VCONN_READ_READY = 100,
   ORIG_TS_EVENT_VCONN_WRITE_READY = 101,
   ORIG_TS_EVENT_VCONN_READ_COMPLETE = 102,
   ORIG_TS_EVENT_VCONN_WRITE_COMPLETE = 103,
   ORIG_TS_EVENT_VCONN_EOS = 104,
-
   ORIG_TS_EVENT_NET_CONNECT = 200,
   ORIG_TS_EVENT_NET_CONNECT_FAILED = 201,
   ORIG_TS_EVENT_NET_ACCEPT = 202,
   ORIG_TS_EVENT_NET_ACCEPT_FAILED = 204,
-
   ORIG_TS_EVENT_HOST_LOOKUP = 500,
-
   ORIG_TS_EVENT_CACHE_OPEN_READ = 1102,
   ORIG_TS_EVENT_CACHE_OPEN_READ_FAILED = 1103,
   ORIG_TS_EVENT_CACHE_OPEN_WRITE = 1108,
@@ -5466,7 +5459,6 @@ typedef enum {
   ORIG_TS_EVENT_CACHE_SCAN_OPERATION_BLOCKED = 1123,
   ORIG_TS_EVENT_CACHE_SCAN_OPERATION_FAILED = 1124,
   ORIG_TS_EVENT_CACHE_SCAN_DONE = 1125,
-
   ORIG_TS_EVENT_HTTP_CONTINUE = 60000,
   ORIG_TS_EVENT_HTTP_ERROR = 60001,
   ORIG_TS_EVENT_HTTP_READ_REQUEST_HDR = 60002,
@@ -5483,7 +5475,6 @@ typedef enum {
   ORIG_TS_EVENT_HTTP_SSN_START = 60013,
   ORIG_TS_EVENT_HTTP_SSN_CLOSE = 60014,
   ORIG_TS_EVENT_HTTP_CACHE_LOOKUP_COMPLETE = 60015,
-
   ORIG_TS_EVENT_MGMT_UPDATE = 60100
 } ORIG_TSEvent;
 
@@ -5524,7 +5515,6 @@ typedef enum {
   ORIG_TS_SUCCESS = 0,
 } ORIG_TSReturnCode;
 
-
 REGRESSION_TEST(SDK_API_TSConstant)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -5541,7 +5531,6 @@ REGRESSION_TEST(SDK_API_TSConstant)(RegressionTest *test, int /* atype ATS_UNUSE
   PRINT_DIFF(TS_HTTP_STATUS_OK);
   PRINT_DIFF(TS_HTTP_STATUS_CREATED);
 
-
   PRINT_DIFF(TS_HTTP_STATUS_ACCEPTED);
   PRINT_DIFF(TS_HTTP_STATUS_NON_AUTHORITATIVE_INFORMATION);
   PRINT_DIFF(TS_HTTP_STATUS_NO_CONTENT);
@@ -5665,7 +5654,6 @@ REGRESSION_TEST(SDK_API_TSConstant)(RegressionTest *test, int /* atype ATS_UNUSE
   PRINT_DIFF(TS_ERROR);
   PRINT_DIFF(TS_SUCCESS);
 
-
   if (test_passed) {
     *pstatus = REGRESSION_TEST_PASSED;
   } else {
@@ -5684,7 +5672,6 @@ REGRESSION_TEST(SDK_API_TSConstant)(RegressionTest *test, int /* atype ATS_UNUSE
 //                    TSHttpTxnParentProxySet
 //////////////////////////////////////////////
 
-
 typedef struct {
   RegressionTest *test;
   int *pstatus;
@@ -5727,7 +5714,6 @@ checkHttpTxnParentProxy(ContData *data, TSHttpTxn txnp)
   return TS_EVENT_CONTINUE;
 }
 
-
 static int
 ssn_handler(TSCont contp, TSEvent event, void *edata)
 {
@@ -5856,7 +5842,6 @@ ssn_handler(TSCont contp, TSEvent event, void *edata)
   return 0;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpSsn)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -6018,9 +6003,7 @@ cache_hook_handler(TSCont contp, TSEvent event, void *edata)
     }
 
     TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
-  }
-
-  break;
+  } break;
 
   case TS_EVENT_IMMEDIATE:
   case TS_EVENT_TIMEOUT:
@@ -6039,7 +6022,8 @@ cache_hook_handler(TSCont contp, TSEvent event, void *edata)
 
     /* Browser got the response. test is over. clean up */
     {
-      /* If this is the first time, then the response is in cache and we should make */
+      /* If this is the first time, then the response is in cache and we should
+       * make */
       /* another request to get cache hit */
       if (data->first_time == true) {
         data->first_time = false;
@@ -6081,7 +6065,6 @@ cache_hook_handler(TSCont contp, TSEvent event, void *edata)
   return 0;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpTxnCache)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -6378,7 +6361,8 @@ transformable(TSHttpTxn txnp, TransformTestData *data)
   // XXX - Can't return TS_ERROR because that is a different type
   // -bcall 7/24/07
   //     if (resp_status == TS_ERROR) {
-  //      SDK_RPRINT(data->test,"TSHttpTxnTransform","",TC_FAIL,"[transformable]: TSHttpHdrStatusGet returns TS_ERROR");
+  //      SDK_RPRINT(data->test,"TSHttpTxnTransform","",TC_FAIL,"[transformable]:
+  // TSHttpHdrStatusGet returns TS_ERROR");
   //     }
 
   return 0; /* not a 200 */
@@ -6456,7 +6440,8 @@ transform_hook_handler(TSCont contp, TSEvent event, void *edata)
     if (transformable(txnp, data)) {
       transform_add(txnp, data);
     }
-    /* Call TransformedRespCache or UntransformedRespCache depending on request */
+    /* Call TransformedRespCache or UntransformedRespCache depending on request
+     */
     {
       TSMBuffer bufp;
       TSMLoc hdr;
@@ -6644,7 +6629,6 @@ transform_hook_handler(TSCont contp, TSEvent event, void *edata)
   return 0;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpTxnTransform)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -6809,9 +6793,7 @@ altinfo_hook_handler(TSCont contp, TSEvent event, void *edata)
 
     TSHttpAltInfoQualitySet(infop, 0.5);
     SDK_RPRINT(data->test, "TSHttpAltInfoQualitySet", "TestCase", TC_PASS, "ok");
-  }
-
-  break;
+  } break;
 
   case TS_EVENT_IMMEDIATE:
   case TS_EVENT_TIMEOUT:
@@ -6830,7 +6812,8 @@ altinfo_hook_handler(TSCont contp, TSEvent event, void *edata)
 
     /* Browser got the response. test is over. clean up */
     {
-      /* If this is the first time, then both the responses are in cache and we should make */
+      /* If this is the first time, then both the responses are in cache and we
+       * should make */
       /* another request to get cache hit */
       if (data->first_time == true) {
         data->first_time = false;
@@ -6840,7 +6823,8 @@ altinfo_hook_handler(TSCont contp, TSEvent event, void *edata)
         /* Send another similar client request */
         synclient_txn_send_request(data->browser3, data->request3);
 
-        /* Register to HTTP hooks that are called in case of alternate selection */
+        /* Register to HTTP hooks that are called in case of alternate selection
+         */
         TSHttpHookAdd(TS_HTTP_SELECT_ALT_HOOK, contp);
         TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
         return 0;
@@ -6881,7 +6865,6 @@ altinfo_hook_handler(TSCont contp, TSEvent event, void *edata)
   return 0;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpAltInfo)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -6927,7 +6910,6 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpAltInfo)(RegressionTest *test, int /* atyp
   return;
 }
 
-
 //////////////////////////////////////////////
 //       SDK_API_TSHttpConnect
 //
@@ -6941,7 +6923,8 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpAltInfo)(RegressionTest *test, int /* atyp
 //
 // Same test strategy:
 //  - create a synthetic server listening on port A
-//  - use HttpConnect to send a request to TS for an url on a remote host H, port B
+//  - use HttpConnect to send a request to TS for an url on a remote host H,
+// port B
 //  - use TxnIntercept or TxnServerIntercept to forward the request
 //    to the synthetic server on local host, port A
 //  - make sure response is correct
@@ -6967,7 +6950,6 @@ typedef struct {
   unsigned long magic;
 } ConnectTestData;
 
-
 static int
 cont_test_handler(TSCont contp, TSEvent event, void *edata)
 {
@@ -7069,7 +7051,6 @@ done:
   return TS_EVENT_IMMEDIATE;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectIntercept)(RegressionTest *test, int /* atype */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -7111,7 +7092,6 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectIntercept)(RegressionTest *test,
   return;
 }
 
-
 EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectServerIntercept)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -7130,7 +7110,8 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectServerIntercept)(RegressionTest *
   /* Register to hook READ_REQUEST */
   TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, cont_test);
 
-  /* This is cool ! we can use the code written for the synthetic server and client in InkAPITest.cc */
+  /* This is cool ! we can use the code written for the synthetic server and
+   * client in InkAPITest.cc */
   data->os = synserver_create(SYNSERVER_DUMMY_PORT);
 
   data->browser = synclient_txn_create();
@@ -7151,7 +7132,6 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectServerIntercept)(RegressionTest *
   return;
 }
 
-
 ////////////////////////////////////////////////
 // SDK_API_OVERRIDABLE_CONFIGS
 //
@@ -7221,7 +7201,6 @@ REGRESSION_TEST(SDK_API_OVERRIDABLE_CONFIGS)(RegressionTest *test, int /* atype
   const char *test_string = "The Apache Traffic Server";
   int len;
 
-
   s->init();
 
   *pstatus = REGRESSION_TEST_INPROGRESS;
@@ -7303,11 +7282,15 @@ REGRESSION_TEST(SDK_API_OVERRIDABLE_CONFIGS)(RegressionTest *test, int /* atype
 
 REGRESSION_TEST(SDK_API_ENCODING)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus)
 {
-  const char *url = "http://www.example.com/foo?fie= \"#%<>[]\\^`{}~&bar={test}&fum=Apache Traffic Server";
-  const char *url_encoded =
-    "http://www.example.com/foo?fie=%20%22%23%25%3C%3E%5B%5D%5C%5E%60%7B%7D%7E&bar=%7Btest%7D&fum=Apache%20Traffic%20Server";
-  const char *url_base64 =
-    "aHR0cDovL3d3dy5leGFtcGxlLmNvbS9mb28/ZmllPSAiIyU8PltdXF5ge31+JmJhcj17dGVzdH0mZnVtPUFwYWNoZSBUcmFmZmljIFNlcnZlcg==";
+  const char *url = "http://www.example.com/foo?fie= "
+                    "\"#%<>[]\\^`{}~&bar={test}&fum=Apache Traffic Server";
+  const char *url_encoded = "http://www.example.com/"
+                            "foo?fie=%20%22%23%25%3C%3E%5B%5D%5C%5E%60%7B%7D%"
+                            "7E&bar=%7Btest%7D&fum=Apache%20Traffic%20Server";
+  const char *url_base64 = "aHR0cDovL3d3dy5leGFtcGxlLmNvbS9mb28/"
+                           "ZmllPSAiIyU8PltdXF5ge31+"
+                           "JmJhcj17dGVzdH0mZnVtPUFwYWNoZSBUcmFmZmljIFNlcnZlcg="
+                           "=";
   const char *url2 = "http://www.example.com/"; // No Percent encoding necessary
   char buf[1024];
   size_t length;
@@ -7390,7 +7373,6 @@ REGRESSION_TEST(SDK_API_ENCODING)(RegressionTest *test, int /* atype ATS_UNUSED
   return;
 }
 
-
 ////////////////////////////////////////////////
 // SDK_API_DEBUG_NAME_LOOKUPS
 //
@@ -7418,7 +7400,6 @@ REGRESSION_TEST(SDK_API_DEBUG_NAME_LOOKUPS)(RegressionTest *test, int /* atype A
     SDK_RPRINT(test, "TSHttpServerStateNameLookup", "TestCase1", TC_PASS, "ok");
   }
 
-
   str = TSHttpHookNameLookup(TS_HTTP_READ_RESPONSE_HDR_HOOK);
   if ((strlen(str) != strlen(hook_name) || strcmp(str, hook_name))) {
     SDK_RPRINT(test, "TSHttpHookNameLookup", "TestCase1", TC_FAIL, "Failed on %d, expected %s, got %s",
@@ -7428,7 +7409,6 @@ REGRESSION_TEST(SDK_API_DEBUG_NAME_LOOKUPS)(RegressionTest *test, int /* atype A
     SDK_RPRINT(test, "TSHttpHookNameLookup", "TestCase1", TC_PASS, "ok");
   }
 
-
   str = TSHttpEventNameLookup(TS_EVENT_IMMEDIATE);
   if ((strlen(str) != strlen(event_name) || strcmp(str, event_name))) {
     SDK_RPRINT(test, "TSHttpEventNameLookup", "TestCase1", TC_FAIL, "Failed on %d, expected %s, got %s", TS_EVENT_IMMEDIATE,
@@ -7438,7 +7418,6 @@ REGRESSION_TEST(SDK_API_DEBUG_NAME_LOOKUPS)(RegressionTest *test, int /* atype A
     SDK_RPRINT(test, "TSHttpEventNameLookup", "TestCase1", TC_PASS, "ok");
   }
 
-
   *pstatus = success ? REGRESSION_TEST_PASSED : REGRESSION_TEST_FAILED;
 
   return;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/InkAPITestTool.cc
----------------------------------------------------------------------
diff --git a/proxy/InkAPITestTool.cc b/proxy/InkAPITestTool.cc
index b5e6707..5328b5e 100644
--- a/proxy/InkAPITestTool.cc
+++ b/proxy/InkAPITestTool.cc
@@ -125,7 +125,6 @@ typedef struct {
   unsigned int magic;
 } ClientTxn;
 
-
 //////////////////////////////////////////////////////////////////////////////
 // DECLARATIONS
 //////////////////////////////////////////////////////////////////////////////
@@ -136,7 +135,6 @@ static char *generate_request(int test_case);
 static char *generate_response(const char *request);
 static int get_request_id(TSHttpTxn txnp);
 
-
 /* client side */
 static ClientTxn *synclient_txn_create(void);
 static int synclient_txn_delete(ClientTxn *txn);
@@ -174,7 +172,6 @@ get_body_ptr(const char *request)
   return (ptr != NULL) ? (ptr + 4) : NULL;
 }
 
-
 /* Caller must free returned request */
 static char *
 generate_request(int test_case)
@@ -276,13 +273,13 @@ generate_request(int test_case)
   return request;
 }
 
-
 /* Caller must free returned response */
 static char *
 generate_response(const char *request)
 {
 // define format for response
-// Each response contains a field X-Response-ID that contains the id of the testcase
+// Each response contains a field X-Response-ID that contains the id of the
+// testcase
 #define HTTP_REQUEST_TESTCASE_FORMAT \
   "GET %1024s HTTP/1.%d\r\n"         \
   "X-Request-ID: %d\r\n"
@@ -360,7 +357,6 @@ generate_response(const char *request)
   "\r\n"                             \
   "Body for response 10"
 
-
   int test_case, match, http_version;
 
   char *response = (char *)TSmalloc(RESPONSE_MAX_SIZE + 1);
@@ -409,7 +405,6 @@ generate_response(const char *request)
   return response;
 }
 
-
 // This routine can be called by tests, from the READ_REQUEST_HDR_HOOK
 // to figure out the id of a test message
 // Returns id/-1 in case of error
@@ -437,7 +432,6 @@ get_request_id(TSHttpTxn txnp)
   return id;
 }
 
-
 //////////////////////////////////////////////////////////////////////////////
 // SOCKET CLIENT
 //////////////////////////////////////////////////////////////////////////////
@@ -540,7 +534,6 @@ synclient_txn_send_request_to_vc(ClientTxn *txn, char *request, TSVConn vc)
   return 1;
 }
 
-
 static int
 synclient_txn_read_response(TSCont contp)
 {
@@ -616,7 +609,6 @@ synclient_txn_read_response_handler(TSCont contp, TSEvent event, void * /* data
   return 1;
 }
 
-
 static int
 synclient_txn_write_request(TSCont contp)
 {
@@ -689,7 +681,6 @@ synclient_txn_write_request_handler(TSCont contp, TSEvent event, void * /* data
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synclient_txn_connect_handler(TSCont contp, TSEvent event, void *data)
 {
@@ -729,7 +720,6 @@ synclient_txn_connect_handler(TSCont contp, TSEvent event, void *data)
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synclient_txn_main_handler(TSCont contp, TSEvent event, void *data)
 {
@@ -740,7 +730,6 @@ synclient_txn_main_handler(TSCont contp, TSEvent event, void *data)
   return (*handler)(contp, event, data);
 }
 
-
 //////////////////////////////////////////////////////////////////////////////
 // SOCKET SERVER
 //////////////////////////////////////////////////////////////////////////////
@@ -840,7 +829,6 @@ synserver_accept_handler(TSCont contp, TSEvent event, void *data)
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synserver_txn_close(TSCont contp)
 {
@@ -865,7 +853,6 @@ synserver_txn_close(TSCont contp)
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synserver_txn_write_response(TSCont contp)
 {
@@ -904,7 +891,6 @@ synserver_txn_write_response(TSCont contp)
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synserver_txn_write_response_handler(TSCont contp, TSEvent event, void * /* data ATS_UNUSED */)
 {
@@ -940,7 +926,6 @@ synserver_txn_write_response_handler(TSCont contp, TSEvent event, void * /* data
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synserver_txn_read_request(TSCont contp)
 {
@@ -1019,7 +1004,6 @@ synserver_txn_read_request_handler(TSCont contp, TSEvent event, void * /* data A
   return TS_EVENT_IMMEDIATE;
 }
 
-
 static int
 synserver_txn_main_handler(TSCont contp, TSEvent event, void *data)
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/InkIOCoreAPI.cc
----------------------------------------------------------------------
diff --git a/proxy/InkIOCoreAPI.cc b/proxy/InkIOCoreAPI.cc
index 58492d3..bb5676f 100644
--- a/proxy/InkIOCoreAPI.cc
+++ b/proxy/InkIOCoreAPI.cc
@@ -46,7 +46,6 @@
 #define sdk_assert(EX) ((void)((EX) ? (void)0 : _TSReleaseAssert(#EX, __FILE__, __LINE__)))
 #endif
 
-
 TSReturnCode
 sdk_sanity_check_mutex(TSMutex mutex)
 {
@@ -63,7 +62,6 @@ sdk_sanity_check_mutex(TSMutex mutex)
   return TS_SUCCESS;
 }
 
-
 TSReturnCode
 sdk_sanity_check_hostlookup_structure(TSHostLookupResult data)
 {
@@ -86,7 +84,6 @@ sdk_sanity_check_iocore_structure(void *data)
 TSReturnCode sdk_sanity_check_continuation(TSCont cont);
 TSReturnCode sdk_sanity_check_null_ptr(void *ptr);
 
-
 ////////////////////////////////////////////////////////////////////
 //
 // Threads
@@ -168,7 +165,6 @@ TSThreadSelf(void)
   return ithread;
 }
 
-
 ////////////////////////////////////////////////////////////////////
 //
 // Mutexes
@@ -229,7 +225,6 @@ TSMutexLock(TSMutex mutexp)
   MUTEX_TAKE_LOCK((ProxyMutex *)mutexp, this_ethread());
 }
 
-
 TSReturnCode
 TSMutexLockTry(TSMutex mutexp)
 {
@@ -401,7 +396,6 @@ INKUDPSendTo(TSCont contp, INKUDPConn udp, unsigned int ip, int port, char *data
   return reinterpret_cast<TSAction>(conn->send((Continuation *)contp, packet));
 }
 
-
 TSAction
 INKUDPRecvFrom(TSCont contp, INKUDPConn udp)
 {
@@ -488,7 +482,6 @@ INKUDPPacketGet(INKUDPacketQueue queuep)
   return NULL;
 }
 
-
 /* Buffers */
 
 TSIOBuffer

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Main.h
----------------------------------------------------------------------
diff --git a/proxy/Main.h b/proxy/Main.h
index b1f3c9b..95a68a6 100644
--- a/proxy/Main.h
+++ b/proxy/Main.h
@@ -28,7 +28,6 @@
 #include "Regression.h"
 #include "I_Version.h"
 
-
 //
 // Constants
 //

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ParentSelection.cc
----------------------------------------------------------------------
diff --git a/proxy/ParentSelection.cc b/proxy/ParentSelection.cc
index cd30180..12862f9 100644
--- a/proxy/ParentSelection.cc
+++ b/proxy/ParentSelection.cc
@@ -305,7 +305,6 @@ ParentConfigParams::findParent(HttpRequestData *rdata, ParentResult *result)
   }
 }
 
-
 void
 ParentConfigParams::recordRetrySuccess(ParentResult *result)
 {
@@ -1016,7 +1015,6 @@ setup_socks_servers(ParentRecord *rec_arr, int len)
   return 0;
 }
 
-
 void
 SocksServerConfig::reconfigure()
 {
@@ -1107,7 +1105,6 @@ request_to_data(HttpRequestData *req, sockaddr const *srcip, sockaddr const *dst
   http_parser_clear(&parser);
 }
 
-
 static int passes;
 static int fails;
 
@@ -1154,7 +1151,8 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */,
   // Test 1
   tbl[0] = '\0';
   ST(1)
-  T("dest_domain=. parent=red:37412,orange:37412,yellow:37412 round_robin=strict\n")
+  T("dest_domain=. parent=red:37412,orange:37412,yellow:37412 "
+    "round_robin=strict\n")
   REBUILD int c, red = 0, orange = 0, yellow = 0;
   for (c = 0; c < 21; c++) {
     REINIT br(request, "fruit_basket.net");
@@ -1166,7 +1164,8 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */,
   // Test 2
   ST(2)
   tbl[0] = '\0';
-  T("dest_domain=. parent=green:4325,blue:4325,indigo:4325,violet:4325 round_robin=false\n")
+  T("dest_domain=. parent=green:4325,blue:4325,indigo:4325,violet:4325 "
+    "round_robin=false\n")
   REBUILD int g = 0, b = 0, i = 0, v = 0;
   for (c = 0; c < 17; c++) {
     REINIT br(request, "fruit_basket.net");
@@ -1182,23 +1181,26 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */,
   tbl[0] = '\0';
 #define TEST_IP4_ADDR "209.131.62.14"
 #define TEST_IP6_ADDR "BEEF:DEAD:ABBA:CAFE:1337:1E1F:5EED:C0FF"
-  T("dest_ip=" TEST_IP4_ADDR " parent=cat:37,dog:24 round_robin=strict\n")             /* L1 */
-  T("dest_ip=" TEST_IP6_ADDR " parent=zwoop:37,jMCg:24 round_robin=strict\n")          /* L1 */
-  T("dest_host=www.pilot.net parent=pilot_net:80\n")                                   /* L2 */
-  T("url_regex=snoopy parent=odie:80,garfield:80 round_robin=true\n")                  /* L3 */
-  T("dest_domain=i.am parent=amy:80,katie:80,carissa:771 round_robin=false\n")         /* L4 */
-  T("dest_domain=microsoft.net time=03:00-22:10 parent=zoo.net:341\n")                 /* L5 */
-  T("dest_domain=microsoft.net time=0:00-02:59 parent=zoo.net:347\n")                  /* L6 */
-  T("dest_domain=microsoft.net time=22:11-23:59 parent=zoo.edu:111\n")                 /* L7 */
-  T("dest_domain=imac.net port=819 parent=genie:80 round_robin=strict\n")              /* L8 */
-  T("dest_ip=172.34.61.211 port=3142 parent=orangina:80 go_direct=false\n")            /* L9 */
-  T("url_regex=miffy prefix=furry/rabbit parent=nintje:80 go_direct=false\n")          /* L10 */
-  T("url_regex=kitty suffix=tif parent=hello:80 round_robin=strict go_direct=false\n") /* L11 */
-  T("url_regex=cyclops method=get parent=turkey:80\n")                                 /* L12 */
-  T("url_regex=cyclops method=post parent=club:80\n")                                  /* L13 */
-  T("url_regex=cyclops method=put parent=sandwich:80\n")                               /* L14 */
-  T("url_regex=cyclops method=trace parent=mayo:80\n")                                 /* L15 */
-  T("dest_host=pluto scheme=HTTP parent=strategy:80\n")                                /* L16 */
+  T("dest_ip=" TEST_IP4_ADDR " parent=cat:37,dog:24 round_robin=strict\n")     /* L1 */
+  T("dest_ip=" TEST_IP6_ADDR " parent=zwoop:37,jMCg:24 round_robin=strict\n")  /* L1 */
+  T("dest_host=www.pilot.net parent=pilot_net:80\n")                           /* L2 */
+  T("url_regex=snoopy parent=odie:80,garfield:80 round_robin=true\n")          /* L3 */
+  T("dest_domain=i.am parent=amy:80,katie:80,carissa:771 round_robin=false\n") /* L4 */
+  T("dest_domain=microsoft.net time=03:00-22:10 parent=zoo.net:341\n")         /* L5 */
+  T("dest_domain=microsoft.net time=0:00-02:59 parent=zoo.net:347\n")          /* L6 */
+  T("dest_domain=microsoft.net time=22:11-23:59 parent=zoo.edu:111\n")         /* L7 */
+  T("dest_domain=imac.net port=819 parent=genie:80 round_robin=strict\n")      /* L8
+                                                                                  */
+  T("dest_ip=172.34.61.211 port=3142 parent=orangina:80 go_direct=false\n")    /* L9
+                                                                                  */
+  T("url_regex=miffy prefix=furry/rabbit parent=nintje:80 go_direct=false\n")  /* L10 */
+  T("url_regex=kitty suffix=tif parent=hello:80 round_robin=strict "
+    "go_direct=false\n")                                 /* L11 */
+  T("url_regex=cyclops method=get parent=turkey:80\n")   /* L12 */
+  T("url_regex=cyclops method=post parent=club:80\n")    /* L13 */
+  T("url_regex=cyclops method=put parent=sandwich:80\n") /* L14 */
+  T("url_regex=cyclops method=trace parent=mayo:80\n")   /* L15 */
+  T("dest_host=pluto scheme=HTTP parent=strategy:80\n")  /* L16 */
   REBUILD
   // Test 3
   IpEndpoint ip;
@@ -1245,7 +1247,8 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */,
     //   FP RE(verify(result,PARENT_SPECIFIED,"genie",80),8)
     // Test 7 - N Parent Table
     tbl[0] = '\0';
-  T("dest_domain=rabbit.net parent=fuzzy:80,fluffy:80,furry:80,frisky:80 round_robin=strict go_direct=true\n")
+  T("dest_domain=rabbit.net parent=fuzzy:80,fluffy:80,furry:80,frisky:80 "
+    "round_robin=strict go_direct=true\n")
   REBUILD
   // Test 8
   ST(8) REINIT br(request, "i.am.rabbit.net");

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Plugin.cc
----------------------------------------------------------------------
diff --git a/proxy/Plugin.cc b/proxy/Plugin.cc
index 6927fa1..0572193 100644
--- a/proxy/Plugin.cc
+++ b/proxy/Plugin.cc
@@ -57,7 +57,8 @@ PluginRegInfo::PluginRegInfo()
 
 PluginRegInfo::~PluginRegInfo()
 {
-  // We don't support unloading plugins once they are successfully loaded, so assert
+  // We don't support unloading plugins once they are successfully loaded, so
+  // assert
   // that we don't accidentally attempt this.
   ink_release_assert(this->plugin_registered == false);
   ink_release_assert(this->link.prev == NULL);
@@ -91,7 +92,8 @@ plugin_load(int argc, char *argv[], bool validateOnly)
     }
   }
 
-  // elevate the access to read files as root if compiled with capabilities, if not
+  // elevate the access to read files as root if compiled with capabilities, if
+  // not
   // change the effective user to root
   {
 #if TS_USE_POSIX_CAP
@@ -200,7 +202,6 @@ plugin_expand(char *arg)
     break;
   }
 
-
 not_found:
   Warning("plugin.config: unable to find parameter %s", arg);
   return NULL;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/PluginVC.cc
----------------------------------------------------------------------
diff --git a/proxy/PluginVC.cc b/proxy/PluginVC.cc
index 66f2d1b..318a08c 100644
--- a/proxy/PluginVC.cc
+++ b/proxy/PluginVC.cc
@@ -53,7 +53,8 @@
    PluginVC, there a two locks. The one we got from our PluginVCCore and
    the lock from the state machine using the PluginVC.  The read side
    lock & the write side lock must be the same.  The regular net processor has
-   this constraint as well.  In order to handle scheduling of retry events cleanly,
+   this constraint as well.  In order to handle scheduling of retry events
+ cleanly,
    we have two event pointers, one for each lock.  sm_lock_retry_event can only
    be changed while holding the using state machine's lock and
    core_lock_retry_event can only be manipulated while holding the PluginVC's
@@ -492,7 +493,6 @@ PluginVC::process_write_side(bool other_side_call)
   Debug("pvc", "[%u] %s: process_write_side", core_obj->id, PVC_TYPE);
   need_write_process = false;
 
-
   // Check the state of our write buffer as well as ntodo
   int64_t ntodo = write_state.vio.ntodo();
   if (ntodo == 0) {
@@ -559,7 +559,6 @@ PluginVC::process_write_side(bool other_side_call)
   }
 }
 
-
 // void PluginVC::process_read_side()
 //
 //   This function may only be called while holding
@@ -727,7 +726,8 @@ PluginVC::process_close()
   core_obj->attempt_delete();
 }
 
-// void PluginVC::process_timeout(Event** e, int event_to_send, Event** our_eptr)
+// void PluginVC::process_timeout(Event** e, int event_to_send, Event**
+// our_eptr)
 //
 //   Handles sending timeout event to the VConnection.  e is the event we got
 //     which indicates the timeout.  event_to_send is the event to the
@@ -1127,7 +1127,6 @@ PluginVCCore::state_send_accept(int /* event ATS_UNUSED */, void * /* data ATS_U
   return 0;
 }
 
-
 // void PluginVCCore::attempt_delete()
 //
 //  Mutex must be held when calling this function

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Prefetch.cc
----------------------------------------------------------------------
diff --git a/proxy/Prefetch.cc b/proxy/Prefetch.cc
index 521aa83..02cef58 100644
--- a/proxy/Prefetch.cc
+++ b/proxy/Prefetch.cc
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #include "Prefetch.h"
 #include "HdrUtils.h"
 #include "HttpCompat.h"
@@ -51,20 +50,20 @@ struct html_tag prefetch_allowable_html_tags[] = {
 
 // this attribute table is hard coded. It has to be the same size as
 // the prefetch_allowable_html_tags table
-struct html_tag prefetch_allowable_html_attrs[] = {
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {NULL, NULL},
-  {"rel", "stylesheet"}, // We want to prefetch the .css files that are common; make sure this matches {"link", "href"}
-  {NULL, NULL}};
+struct html_tag prefetch_allowable_html_attrs[] = {{NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {NULL, NULL},
+                                                   {"rel", "stylesheet"}, // We want to prefetch the .css files that are
+                                                                          // common; make sure this matches {"link", "href"}
+                                                   {NULL, NULL}};
 
 static const char *PREFETCH_FIELD_RECURSION;
 static int PREFETCH_FIELD_LEN_RECURSION;
@@ -529,15 +528,19 @@ PrefetchTransform::redirect(HTTPHdr *resp)
   char *req_url = NULL;
   char *redirect_url = NULL;
 
-  /* Check for responses validity. If the response is valid, determine the status of the response.
+  /* Check for responses validity. If the response is valid, determine the
+     status of the response.
      We need to find out if there was a redirection (301, 302, 303, 307).
    */
   if ((resp != NULL) && (resp->valid())) {
     response_status = resp->status_get();
 
-    /* OK, so we got the response. Now if the response is a redirect we have to check if we also
-       got a Location: header. This indicates the new location where our object is located.
-       If refirect_url was not found, letz falter back to just a recursion. Since
+    /* OK, so we got the response. Now if the response is a redirect we have to
+       check if we also
+       got a Location: header. This indicates the new location where our object
+       is located.
+       If refirect_url was not found, letz falter back to just a recursion.
+       Since
        we might find the url in the body.
      */
     if (resp->presence(MIME_PRESENCE_LOCATION)) {
@@ -620,7 +623,6 @@ PrefetchTransform::hash_add(char *s)
   if (normalize_url(s, &str_len) > 0)
     Debug("PrefetchParserURLs", "Normalized URL: %s\n", s);
 
-
   INK_MD5 hash;
   MD5Context().hash_immediate(hash, s, str_len);
   index = hash.slice32(1) % HASH_TABLE_LENGTH;
@@ -636,7 +638,6 @@ PrefetchTransform::hash_add(char *s)
   return *e;
 }
 
-
 #define IS_RECURSIVE_PREFETCH(req_ip) (prefetch_config->max_recursion > 0 && ats_is_ip_loopback(&req_ip))
 
 static void
@@ -731,7 +732,6 @@ check_n_attach_prefetch_transform(HttpSM *sm, HTTPHdr *resp, bool from_cache)
   }
 }
 
-
 static int
 PrefetchPlugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edata)
 {
@@ -775,7 +775,8 @@ PrefetchPlugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edata)
 
   TSHttpTxnReenable(reinterpret_cast<TSHttpTxn>(sm), TS_EVENT_HTTP_CONTINUE);
 
-  // Debug("PrefetchPlugin", "Returning after check_n_attach_prefetch_transform()\n");
+  // Debug("PrefetchPlugin", "Returning after
+  // check_n_attach_prefetch_transform()\n");
 
   return 0;
 }
@@ -1428,10 +1429,10 @@ Lcheckcookie:
     request->field_delete(MIME_FIELD_COOKIE, MIME_LEN_COOKIE);
   }
 
-  DUMP_HEADER("PrefetchCookies", req_hdr, (int64_t)0,
-              "Request Header for the top page used as the base for the new request with Cookies");
-  DUMP_HEADER("PrefetchCookies", resp_hdr, (int64_t)0,
-              "Response Header for the top page used as the base for the new request with Cookies");
+  DUMP_HEADER("PrefetchCookies", req_hdr, (int64_t)0, "Request Header for the top page used as the base for the new "
+                                                      "request with Cookies");
+  DUMP_HEADER("PrefetchCookies", resp_hdr, (int64_t)0, "Response Header for the top page used as the base for the new "
+                                                       "request with Cookies");
   DUMP_HEADER("PrefetchCookies", request, (int64_t)0, "Request Header with Cookies generated by Prefetch Parser");
 }
 
@@ -1844,7 +1845,8 @@ config_read_proto(TSPrefetchBlastData &blast, const char *str)
   else { // this is a multicast address:
     if (strncasecmp("multicast:", str, 10) == 0) {
       if (0 != ats_ip_pton(str, ats_ip_sa_cast(&blast.ip))) {
-        Error("PrefetchProcessor: Address specified for multicast does not seem to "
+        Error("PrefetchProcessor: Address specified for multicast does not "
+              "seem to "
               "be of the form multicast:ip_addr (eg: multicast:224.0.0.1)");
         return 1;
       } else {
@@ -1904,7 +1906,8 @@ PrefetchConfiguration::readConfiguration()
 
   conf_path = RecConfigReadConfigPath("proxy.config.prefetch.config_file");
   if (!conf_path) {
-    Warning("PrefetchProcessor: No prefetch configuration file specified. Prefetch disabled\n");
+    Warning("PrefetchProcessor: No prefetch configuration file specified. "
+            "Prefetch disabled\n");
     goto Lerror;
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Prefetch.h
----------------------------------------------------------------------
diff --git a/proxy/Prefetch.h b/proxy/Prefetch.h
index 5ca323b..fed5093 100644
--- a/proxy/Prefetch.h
+++ b/proxy/Prefetch.h
@@ -409,7 +409,6 @@ public:
   IOBufferReader *reader;
 };
 
-
 #define PREFETCH_CONFIG_UPDATE_TIMEOUT (HRTIME_SECOND * 60)
 
 #endif // PREFETCH

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ProtoSM.h
----------------------------------------------------------------------
diff --git a/proxy/ProtoSM.h b/proxy/ProtoSM.h
index 848e37e..3e32c1b 100644
--- a/proxy/ProtoSM.h
+++ b/proxy/ProtoSM.h
@@ -142,7 +142,6 @@ ProtoVCTable<VCTentry, max_entries>::cleanup_all()
   }
 }
 
-
 template <class VCTentry, int max_entries>
 inline bool
 ProtoVCTable<VCTentry, max_entries>::is_table_clear()

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ProtocolProbeSessionAccept.cc
----------------------------------------------------------------------
diff --git a/proxy/ProtocolProbeSessionAccept.cc b/proxy/ProtocolProbeSessionAccept.cc
index f512aa1..a15421a 100644
--- a/proxy/ProtocolProbeSessionAccept.cc
+++ b/proxy/ProtocolProbeSessionAccept.cc
@@ -30,7 +30,8 @@
 static bool
 proto_is_spdy(IOBufferReader *reader)
 {
-  // SPDY clients have to start by sending a control frame (the high bit is set). Let's assume
+  // SPDY clients have to start by sending a control frame (the high bit is
+  // set). Let's assume
   // that no other protocol could possibly ever set this bit!
   return ((uint8_t)(*reader)[0]) == 0x80u;
 }
@@ -63,7 +64,8 @@ struct ProtocolProbeTrampoline : public Continuation, public ProtocolProbeSessio
     : Continuation(mutex), probeParent(probe)
   {
     this->iobuf = new_MIOBuffer(buffer_size_index);
-    reader = iobuf->alloc_reader(); // reader must be allocated only on a new MIOBuffer.
+    reader = iobuf->alloc_reader(); // reader must be allocated only on a new
+                                    // MIOBuffer.
     SET_HANDLER(&ProtocolProbeTrampoline::ioCompletionEvent);
   }
 
@@ -100,7 +102,8 @@ struct ProtocolProbeTrampoline : public Continuation, public ProtocolProbeSessio
       goto done;
     }
 
-    // SPDY clients have to start by sending a control frame (the high bit is set). Let's assume
+    // SPDY clients have to start by sending a control frame (the high bit is
+    // set). Let's assume
     // that no other protocol could possibly ever set this bit!
     if (proto_is_spdy(reader)) {
       key = PROTO_SPDY;
@@ -118,7 +121,8 @@ struct ProtocolProbeTrampoline : public Continuation, public ProtocolProbeSessio
       goto done;
     }
 
-    // Directly invoke the session acceptor, letting it take ownership of the input buffer.
+    // Directly invoke the session acceptor, letting it take ownership of the
+    // input buffer.
     probeParent->endpoint[key]->accept(netvc, this->iobuf, reader);
     delete this;
     return EVENT_CONT;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ProtocolProbeSessionAccept.h
----------------------------------------------------------------------
diff --git a/proxy/ProtocolProbeSessionAccept.h b/proxy/ProtocolProbeSessionAccept.h
index 9b9e54c..b2fe18d 100644
--- a/proxy/ProtocolProbeSessionAccept.h
+++ b/proxy/ProtocolProbeSessionAccept.h
@@ -59,7 +59,8 @@ private:
 
   /** Child acceptors, index by @c ProtoGroupKey
 
-      We pass on the actual accept to one of these after doing protocol sniffing.
+      We pass on the actual accept to one of these after doing protocol
+     sniffing.
       We make it one larger and leave the last entry NULL so we don't have to
       do range checks on the enum value.
    */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ProxyClientSession.h
----------------------------------------------------------------------
diff --git a/proxy/ProxyClientSession.h b/proxy/ProxyClientSession.h
index 4ed616c..aa4177c 100644
--- a/proxy/ProxyClientSession.h
+++ b/proxy/ProxyClientSession.h
@@ -29,7 +29,8 @@
 #include "InkAPIInternal.h"
 
 // Emit a debug message conditional on whether this particular client session
-// has debugging enabled. This should only be called from within a client session
+// has debugging enabled. This should only be called from within a client
+// session
 // member function.
 #define DebugSsn(ssn, tag, ...) DebugSpecific((ssn)->debug(), tag, __VA_ARGS__)
 
@@ -103,7 +104,8 @@ public:
   static int64_t next_connection_id();
 
 protected:
-  // XXX Consider using a bitwise flags variable for the following flags, so that we can make the best
+  // XXX Consider using a bitwise flags variable for the following flags, so
+  // that we can make the best
   // use of internal alignment padding.
 
   // Session specific debug flag.

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ReverseProxy.h
----------------------------------------------------------------------
diff --git a/proxy/ReverseProxy.h b/proxy/ReverseProxy.h
index 680401a..88b1d74 100644
--- a/proxy/ReverseProxy.h
+++ b/proxy/ReverseProxy.h
@@ -57,7 +57,8 @@ extern remap_plugin_info *remap_pi_list;
 int init_reverse_proxy();
 
 // Both Return true if a remapping was made and false otherwise
-// ebalsa@ Y! -- this happens in the remapProcessor now for the reverse proxy case (not CDN or BlindTunnel)
+// ebalsa@ Y! -- this happens in the remapProcessor now for the reverse proxy
+// case (not CDN or BlindTunnel)
 bool request_url_remap(HttpTransact::State *s, HTTPHdr *request_header, char **redirect_url,
                        unsigned int filter_mask = URL_REMAP_FILTER_NONE);
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Show.h
----------------------------------------------------------------------
diff --git a/proxy/Show.h b/proxy/Show.h
index e6d8498..52481c9 100644
--- a/proxy/Show.h
+++ b/proxy/Show.h
@@ -151,5 +151,4 @@ public:
   }
 };
 
-
 #endif


[3/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/StatSystem.cc
----------------------------------------------------------------------
diff --git a/proxy/StatSystem.cc b/proxy/StatSystem.cc
index 391cdd8..fa4d102 100644
--- a/proxy/StatSystem.cc
+++ b/proxy/StatSystem.cc
@@ -40,7 +40,6 @@
 
 #define SNAP_USAGE_PERIOD HRTIME_SECONDS(2)
 
-
 // variables
 
 #ifdef DEBUG
@@ -65,7 +64,6 @@ int snap_stats_every = 60;
 ink_hrtime http_handler_times[MAX_HTTP_HANDLER_EVENTS];
 int http_handler_counts[MAX_HTTP_HANDLER_EVENTS];
 
-
 char snap_filename[PATH_NAME_MAX] = DEFAULT_SNAP_FILENAME;
 
 #define DEFAULT_PERSISTENT
@@ -117,7 +115,6 @@ static int non_persistent_stats[] = {
 #undef _FOOTER
 #undef _D
 
-
 // functions
 
 static int
@@ -372,7 +369,6 @@ stat_callback(Continuation *cont, HTTPHdr *header)
     snprintf(result, result_size - 7, "<pre>\n%s", buffer);
   }
 
-
   if (!empty) {
     StatPageData data;
 
@@ -425,7 +421,8 @@ initialize_all_global_stats()
 
   if (access(rundir, R_OK | W_OK) == -1) {
     Warning("Unable to access() local state directory '%s': %d, %s", (const char *)rundir, errno, strerror(errno));
-    Warning(" Please set 'proxy.config.local_state_dir' to allow statistics collection");
+    Warning(" Please set 'proxy.config.local_state_dir' to allow statistics "
+            "collection");
   }
   REC_ReadConfigString(snap_file, "proxy.config.stats.snap_file", PATH_NAME_MAX);
   Layout::relative_to(snap_filename, sizeof(snap_filename), (const char *)rundir, snap_file);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/TestClusterHash.cc
----------------------------------------------------------------------
diff --git a/proxy/TestClusterHash.cc b/proxy/TestClusterHash.cc
index 73ab208..3663a4d 100644
--- a/proxy/TestClusterHash.cc
+++ b/proxy/TestClusterHash.cc
@@ -28,7 +28,6 @@
 #include "Cluster.h"
 #include "libts.h"
 
-
 //
 // This test function produces the table included
 // in Memo.ClusterHash

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/TestPreProc.cc
----------------------------------------------------------------------
diff --git a/proxy/TestPreProc.cc b/proxy/TestPreProc.cc
index 68fef0d..562721c 100644
--- a/proxy/TestPreProc.cc
+++ b/proxy/TestPreProc.cc
@@ -70,7 +70,6 @@ RequestInput::run()
   char *buff = m_cb->getWrite(&maxBytes);
   unsigned writeBytes = (m_len < maxBytes) ? m_len : maxBytes;
 
-
   writeBytes = ink_strlcpy(buff, m_sp, maxBytes);
   m_cb->wrote(writeBytes);
 
@@ -177,6 +176,5 @@ main()
     cout << "Elapsed time for " << lc << "loops is " << elapsedTime << endl;
   }
 
-
   return (0);
 }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/TestProxy.cc
----------------------------------------------------------------------
diff --git a/proxy/TestProxy.cc b/proxy/TestProxy.cc
index 5d807a0..fcc061f 100644
--- a/proxy/TestProxy.cc
+++ b/proxy/TestProxy.cc
@@ -30,7 +30,6 @@
 #include "OneWayMultiTunnel.h"
 #include "Cache.h"
 
-
 struct TestProxy : Continuation {
   VConnection *vc;
   VConnection *vconnection_vector[2];

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/TestSimpleProxy.cc
----------------------------------------------------------------------
diff --git a/proxy/TestSimpleProxy.cc b/proxy/TestSimpleProxy.cc
index 6cc70f1..412df16 100644
--- a/proxy/TestSimpleProxy.cc
+++ b/proxy/TestSimpleProxy.cc
@@ -137,7 +137,6 @@ struct TestProxy : Continuation {
   }
 };
 
-
 struct TestAccept : Continuation {
   int
   startEvent(int event, NetVConnection *e)

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/TimeTrace.h
----------------------------------------------------------------------
diff --git a/proxy/TimeTrace.h b/proxy/TimeTrace.h
index 05c1a01..e6445c2 100644
--- a/proxy/TimeTrace.h
+++ b/proxy/TimeTrace.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 /****************************************************************************
 
   TimeTrace.h

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/Transform.h
----------------------------------------------------------------------
diff --git a/proxy/Transform.h b/proxy/Transform.h
index 8585fa0..af0dca3 100644
--- a/proxy/Transform.h
+++ b/proxy/Transform.h
@@ -108,5 +108,4 @@ num_chars_for_int(int64_t i)
 
 extern TransformProcessor transformProcessor;
 
-
 #endif /* __TRANSFORM_H__ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/UDPAPIClientTest.cc
----------------------------------------------------------------------
diff --git a/proxy/UDPAPIClientTest.cc b/proxy/UDPAPIClientTest.cc
index 88e74a2..ff41f21 100644
--- a/proxy/UDPAPIClientTest.cc
+++ b/proxy/UDPAPIClientTest.cc
@@ -28,7 +28,6 @@
 #include <string.h>
 #include <arpa/inet.h>
 
-
 char sendBuff[] = "I'm Alive.";
 
 FILE *fp;
@@ -90,7 +89,6 @@ UDPClient_handle_callbacks(TSCont cont, TSEvent event, void *e)
         for (int i = 0; i < avail; i++)
           fprintf(fp, "%c", *(buf + i));
 
-
         memcpy((char *)&recvBuff + total_len, buf, avail);
         TSIOBufferReaderConsume(reader, avail);
         total_len += avail;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/api/ts/InkAPIPrivateIOCore.h
----------------------------------------------------------------------
diff --git a/proxy/api/ts/InkAPIPrivateIOCore.h b/proxy/api/ts/InkAPIPrivateIOCore.h
index 9b7371a..2eea5ee 100644
--- a/proxy/api/ts/InkAPIPrivateIOCore.h
+++ b/proxy/api/ts/InkAPIPrivateIOCore.h
@@ -128,7 +128,6 @@ TSReturnCode sdk_sanity_check_iocore_structure(void *);
 tsapi TSMutex TSMutexCreateInternal(void);
 tsapi int TSMutexCheck(TSMutex mutex);
 
-
 /* IOBuffer */
 tsapi void TSIOBufferReaderCopy(TSIOBufferReader readerp, const void *buf, int64_t length);
 tsapi int64_t TSIOBufferBlockDataSizeGet(TSIOBufferBlock blockp);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/api/ts/remap.h
----------------------------------------------------------------------
diff --git a/proxy/api/ts/remap.h b/proxy/api/ts/remap.h
index c6f3cab..34705cb 100644
--- a/proxy/api/ts/remap.h
+++ b/proxy/api/ts/remap.h
@@ -38,22 +38,26 @@ extern "C" {
 
 typedef struct _tsremap_api_info {
   unsigned long size;            /* in: sizeof(struct _tsremap_api_info) */
-  unsigned long tsremap_version; /* in: TS supported version ((major << 16) | minor) */
+  unsigned long tsremap_version; /* in: TS supported version ((major << 16) |
+                                    minor) */
 } TSRemapInterface;
 
-
 typedef struct _tm_remap_request_info {
-  /* Important: You should *not* release these buf pointers or TSMLocs from your plugin! */
+  /* Important: You should *not* release these buf pointers or TSMLocs from your
+   * plugin! */
 
   /* these URL mloc's are read only, use normal ts/ts.h APIs for accesing  */
   TSMLoc mapFromUrl;
   TSMLoc mapToUrl;
 
-  /* the request URL mloc and buffer pointers are read-write. You can read and modify the
-   requestUrl using normal ts/ts.h APIs, which is how you change the destination URL. */
+  /* the request URL mloc and buffer pointers are read-write. You can read and
+   modify the
+   requestUrl using normal ts/ts.h APIs, which is how you change the destination
+   URL. */
   TSMLoc requestUrl;
 
-  /* requestBufp and requestHdrp are the equivalent of calling TSHttpTxnClientReqGet(). */
+  /* requestBufp and requestHdrp are the equivalent of calling
+   * TSHttpTxnClientReqGet(). */
   TSMBuffer requestBufp;
   TSMLoc requestHdrp;
 
@@ -61,7 +65,6 @@ typedef struct _tm_remap_request_info {
   int redirect;
 } TSRemapRequestInfo;
 
-
 /* This is the type returned by the TSRemapDoRemap() callback */
 typedef enum {
   TSREMAP_NO_REMAP = 0,       /* No remaping was done, continue with next in chain */
@@ -74,13 +77,14 @@ typedef enum {
      -500 to -599
      ....
      This would allow a plugin to generate an error page. Right now,
-     setting the return code to any negative number is equivalent to TSREMAP_NO_REMAP */
+     setting the return code to any negative number is equivalent to
+     TSREMAP_NO_REMAP */
   TSREMAP_ERROR = -1 /* Some error, that should generate an error page */
 } TSRemapStatus;
 
-
 /* ----------------------------------------------------------------------------------
-   These are the entry points a plugin can implement. Note that TSRemapInit() and
+   These are the entry points a plugin can implement. Note that TSRemapInit()
+   and
    TSRemapDoRemap() are both required.
    ----------------------------------------------------------------------------------
 */
@@ -92,33 +96,33 @@ typedef enum {
 */
 tsapi TSReturnCode TSRemapInit(TSRemapInterface *api_info, char *errbuf, int errbuf_size);
 
-
 /* Remap new request
    Mandatory interface function.
    Remap API plugin can/should use SDK API function calls inside this function!
    return: TSREMAP_NO_REMAP - No remaping was done, continue with next in chain
            TSREMAP_DID_REMAP - Remapping was done, continue with next in chain
-           TSREMAP_NO_REMAP_STOP - No remapping was done, and stop plugin chain evaluation
-           TSREMAP_DID_REMAP_STOP -  Remapping was done, but stop plugin chain evaluation
+           TSREMAP_NO_REMAP_STOP - No remapping was done, and stop plugin chain
+   evaluation
+           TSREMAP_DID_REMAP_STOP -  Remapping was done, but stop plugin chain
+   evaluation
 */
 tsapi TSRemapStatus TSRemapDoRemap(void *ih, TSHttpTxn rh, TSRemapRequestInfo *rri);
 
-
 /* Plugin shutdown, called when plugin is unloaded.
    Optional function. */
 tsapi void TSRemapDone(void);
 
-
-/* Plugin new instance. Create new plugin processing entry for unique remap record.
+/* Plugin new instance. Create new plugin processing entry for unique remap
+   record.
    First two arguments in argv vector are - fromURL and toURL from remap record.
-   Please keep in mind that fromURL and toURL will be converted to canonical view.
+   Please keep in mind that fromURL and toURL will be converted to canonical
+   view.
    Return: TS_SUCESS
            TS_ERROR - instance creation error
 */
 tsapi TSReturnCode TSRemapNewInstance(int argc, char *argv[], void **ih, char *errbuf, int errbuf_size);
 tsapi void TSRemapDeleteInstance(void *);
 
-
 /* Check response code from Origin Server
    os_response_type -> TSServerState
    Remap API plugin can use InkAPI function calls inside TSRemapDoRemap()

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/api/ts/ts.h
----------------------------------------------------------------------
diff --git a/proxy/api/ts/ts.h b/proxy/api/ts/ts.h
index 779f99c..6453c41 100644
--- a/proxy/api/ts/ts.h
+++ b/proxy/api/ts/ts.h
@@ -729,7 +729,6 @@ TSUrlPercentEncode(TSMBuffer bufp, TSMLoc offset, char *dst, size_t dst_size, si
 */
 tsapi TSReturnCode TSStringPercentDecode(const char *str, size_t str_len, char *dst, size_t dst_size, size_t *length);
 
-
 /* --------------------------------------------------------------------------
    MIME headers */
 
@@ -1006,7 +1005,8 @@ tsapi TSReturnCode TSMimeHdrFieldValueUintSet(TSMBuffer bufp, TSMLoc hdr, TSMLoc
 tsapi TSReturnCode TSMimeHdrFieldValueDateSet(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, time_t value);
 
 tsapi TSReturnCode TSMimeHdrFieldValueAppend(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, int idx, const char *value, int length);
-/* These Insert() APIs should be considered. Use the corresponding Set() API instead */
+/* These Insert() APIs should be considered. Use the corresponding Set() API
+ * instead */
 tsapi TSReturnCode
 TSMimeHdrFieldValueStringInsert(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, int idx, const char *value, int length);
 tsapi TSReturnCode TSMimeHdrFieldValueIntInsert(TSMBuffer bufp, TSMLoc hdr, TSMLoc field, int idx, int value);
@@ -1338,7 +1338,8 @@ tsapi struct sockaddr const *TSHttpTxnServerAddrGet(TSHttpTxn txnp);
     This must be invoked before the origin server address is looked up.
     If called no lookup is done, the address @a addr is used instead.
 
-    @return @c TS_SUCCESS if the origin server address is set, @c TS_ERROR otherwise.
+    @return @c TS_SUCCESS if the origin server address is set, @c TS_ERROR
+   otherwise.
 */
 tsapi TSReturnCode TSHttpTxnServerAddrSet(TSHttpTxn txnp, struct sockaddr const *addr /**< Address for origin server. */
                                           );
@@ -1496,8 +1497,10 @@ tsapi void *TSHttpTxnArgGet(TSHttpTxn txnp, int arg_idx);
 tsapi void TSHttpSsnArgSet(TSHttpSsn ssnp, int arg_idx, void *arg);
 tsapi void *TSHttpSsnArgGet(TSHttpSsn ssnp, int arg_idx);
 
-/* The reserve API should only be use in TSAPI plugins, during plugin initialization! */
-/* The lookup methods can be used anytime, but are best used during initialization as well,
+/* The reserve API should only be use in TSAPI plugins, during plugin
+ * initialization! */
+/* The lookup methods can be used anytime, but are best used during
+   initialization as well,
    or at least "cache" the results for best performance. */
 tsapi TSReturnCode TSHttpArgIndexReserve(const char *name, const char *description, int *arg_idx);
 tsapi TSReturnCode TSHttpArgIndexNameLookup(const char *name, int *arg_idx, const char **description);
@@ -1534,7 +1537,8 @@ tsapi void TSHttpTxnDebugSet(TSHttpTxn txnp, int on);
 tsapi int TSHttpTxnDebugGet(TSHttpTxn txnp);
 /**
        Set the session specific debugging flag for this client session.
-       When turned on, internal debug messages related to this session and all transactions
+       When turned on, internal debug messages related to this session and all
+   transactions
        in the session will be written even if the debug tag isn't on.
 
     @param ssnp Client session to change.
@@ -1624,7 +1628,8 @@ tsapi void TSHttpTxnServerIntercept(TSCont contp, TSHttpTxn txnp);
     This returns a VConn that connected to the transaction.
 
     @param addr Target address of the origin server.
-    @param tag A logging tag that can be accessed via the pitag field. May be @c NULL.
+    @param tag A logging tag that can be accessed via the pitag field. May be @c
+   NULL.
     @param id A logging id that can be access via the piid field.
  */
 tsapi TSVConn TSHttpConnectWithPluginId(struct sockaddr const *addr, char const *tag, int64_t id);
@@ -1722,10 +1727,10 @@ tsapi struct sockaddr const *TSNetVConnRemoteAddrGet(TSVConn vc);
       or cancel the attempt to connect.
 
  */
-tsapi TSAction
-TSNetConnect(TSCont contp, /**< continuation that is called back when the attempted net connection either succeeds or fails. */
-             struct sockaddr const *to /**< Address to which to connect. */
-             );
+tsapi TSAction TSNetConnect(TSCont contp,             /**< continuation that is called back when the attempted net
+                                                         connection either succeeds or fails. */
+                            struct sockaddr const *to /**< Address to which to connect. */
+                            );
 
 tsapi TSAction TSNetAccept(TSCont contp, int port, int domain, int accept_threads);
 
@@ -1947,8 +1952,10 @@ tsapi int64_t TSIOBufferReaderAvail(TSIOBufferReader readerp);
 tsapi struct sockaddr const *TSNetVConnLocalAddrGet(TSVConn vc);
 
 /* --------------------------------------------------------------------------
-   Stats and configs based on librecords raw stats (this is preferred API until we
-   rewrite stats). This system has a limitation of up to 1,500 stats max, controlled
+   Stats and configs based on librecords raw stats (this is preferred API until
+   we
+   rewrite stats). This system has a limitation of up to 1,500 stats max,
+   controlled
    via proxy.config.stat_api.max_stats_allowed (default is 512).
 
    This is available as of Apache TS v2.2.*/
@@ -1997,7 +2004,8 @@ tsapi void TSDebug(const char *tag, const char *format_str, ...) TS_PRINTFLIKE(2
     Output a debug line even if the debug tag is turned off, as long as
     debugging is enabled. Could be used as follows:
     @code
-    TSDebugSpecifc(TSHttpTxnDebugGet(txn), "plugin_tag" , "Hello World from transaction %p", txn);
+    TSDebugSpecifc(TSHttpTxnDebugGet(txn), "plugin_tag" , "Hello World from
+   transaction %p", txn);
     @endcode
     will be printed if the plugin_tag is enabled or the transaction specific
     debugging is turned on for txn.
@@ -2157,14 +2165,16 @@ tsapi TSReturnCode TSTextLogObjectRollingEnabledSet(TSTextLogObject the_object,
 tsapi void TSTextLogObjectRollingIntervalSecSet(TSTextLogObject the_object, int rolling_interval_sec);
 
 /**
-    Set the rolling offset. rolling_offset_hr specifies the hour (between 0 and 23) when log rolling
+    Set the rolling offset. rolling_offset_hr specifies the hour (between 0 and
+   23) when log rolling
     should take place.
 
  */
 tsapi void TSTextLogObjectRollingOffsetHrSet(TSTextLogObject the_object, int rolling_offset_hr);
 
 /**
-    Set the rolling size. rolling_size_mb specifies the size in MB when log rolling
+    Set the rolling size. rolling_size_mb specifies the size in MB when log
+   rolling
     should take place.
 
  */
@@ -2240,7 +2250,6 @@ tsapi void TSVConnActiveTimeoutCancel(TSVConn connp);
 */
 tsapi void TSSkipRemappingSet(TSHttpTxn txnp, int flag);
 
-
 /*
   Set or get various overridable configurations, for a transaction. This should
   probably be done as early as possible, e.g. TS_HTTP_READ_REQUEST_HDR_HOOK.
@@ -2284,7 +2293,8 @@ tsapi void TSHttpTxnRedirectUrlSet(TSHttpTxn txnp, const char *url, const int ur
 tsapi TS_DEPRECATED void TSRedirectUrlSet(TSHttpTxn txnp, const char *url, const int url_len);
 
 /**
-   Return the current (if set) redirection URL string. This is still owned by the
+   Return the current (if set) redirection URL string. This is still owned by
+   the
    core, and must not be free'd.
 
    @param txnp the transaction pointer
@@ -2327,10 +2337,13 @@ tsapi int TSHttpTxnBackgroundFillStarted(TSHttpTxn txnp);
 tsapi TSReturnCode TSBase64Decode(const char *str, size_t str_len, unsigned char *dst, size_t dst_size, size_t *length);
 tsapi TSReturnCode TSBase64Encode(const char *str, size_t str_len, char *dst, size_t dst_size, size_t *length);
 
-/* Get milestone timers, useful for measuring where we are spending time in the transaction processing */
+/* Get milestone timers, useful for measuring where we are spending time in the
+ * transaction processing */
 /**
-   Return the particular milestone timer for the transaction. If 0 is returned, it means
-   the transaction has not yet reached that milestone. Asking for an "unknown" milestone is
+   Return the particular milestone timer for the transaction. If 0 is returned,
+   it means
+   the transaction has not yet reached that milestone. Asking for an "unknown"
+   milestone is
    an error.
 
    @param txnp the transaction pointer
@@ -2344,20 +2357,25 @@ tsapi TSReturnCode TSBase64Encode(const char *str, size_t str_len, char *dst, si
 tsapi TSReturnCode TSHttpTxnMilestoneGet(TSHttpTxn txnp, TSMilestonesType milestone, TSHRTime *time);
 
 /**
-  Test whether a request / response header pair would be cacheable under the current
-  configuration. This would typically be used in TS_HTTP_READ_RESPONSE_HDR_HOOK, when
+  Test whether a request / response header pair would be cacheable under the
+  current
+  configuration. This would typically be used in TS_HTTP_READ_RESPONSE_HDR_HOOK,
+  when
   you have both the client request and server response ready.
 
   @param txnp the transaction pointer
-  @param request the client request header. If NULL, use the transactions client request.
-  @param response the server response header. If NULL, use the transactions origin response.
+  @param request the client request header. If NULL, use the transactions client
+  request.
+  @param response the server response header. If NULL, use the transactions
+  origin response.
 
   @return 1 if the request / response is cacheable, 0 otherwise
 */
 tsapi int TSHttpTxnIsCacheable(TSHttpTxn txnp, TSMBuffer request, TSMBuffer response);
 
 /**
-   Return a string respresentation for a TSServerState value. This is useful for plugin debugging.
+   Return a string respresentation for a TSServerState value. This is useful for
+   plugin debugging.
 
    @param state the value of this TSServerState
 
@@ -2366,7 +2384,8 @@ tsapi int TSHttpTxnIsCacheable(TSHttpTxn txnp, TSMBuffer request, TSMBuffer resp
 tsapi const char *TSHttpServerStateNameLookup(TSServerState state);
 
 /**
-   Return a string respresentation for a TSHttpHookID value. This is useful for plugin debugging.
+   Return a string respresentation for a TSHttpHookID value. This is useful for
+   plugin debugging.
 
    @param hook the value of this TSHttpHookID
 
@@ -2375,7 +2394,8 @@ tsapi const char *TSHttpServerStateNameLookup(TSServerState state);
 tsapi const char *TSHttpHookNameLookup(TSHttpHookID hook);
 
 /**
-   Return a string respresentation for a TSEvent value. This is useful for plugin debugging.
+   Return a string respresentation for a TSEvent value. This is useful for
+   plugin debugging.
 
    @param event the value of this TSHttpHookID
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/hdrs/HTTP.cc
----------------------------------------------------------------------
diff --git a/proxy/hdrs/HTTP.cc b/proxy/hdrs/HTTP.cc
index 25f14d1..3de7430 100644
--- a/proxy/hdrs/HTTP.cc
+++ b/proxy/hdrs/HTTP.cc
@@ -29,6 +29,7 @@
 #include "HTTP.h"
 #include "HdrToken.h"
 #include "Diags.h"
+#include "I_IOBuffer.h"
 
 /***********************************************************************
  *                                                                     *
@@ -1782,81 +1783,66 @@ ClassAllocator<HTTPCacheAlt> httpCacheAltAllocator("httpCacheAltAllocator");
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 HTTPCacheAlt::HTTPCacheAlt()
-  : m_magic(CACHE_ALT_MAGIC_ALIVE), m_writeable(1), m_unmarshal_len(-1), m_id(-1), m_rid(-1), m_request_hdr(), m_response_hdr(),
-    m_request_sent_time(0), m_response_received_time(0), m_frag_offset_count(0), m_frag_offsets(0), m_ext_buffer(NULL)
+  : m_magic(CACHE_ALT_MAGIC_ALIVE), m_unmarshal_len(-1), m_id(-1), m_rid(-1), m_frag_count(0), m_request_hdr(), m_response_hdr(),
+    m_request_sent_time(0), m_response_received_time(0), m_fragments(0), m_ext_buffer(NULL)
 {
-  m_object_key[0] = 0;
-  m_object_key[1] = 0;
-  m_object_key[2] = 0;
-  m_object_key[3] = 0;
-  m_object_size[0] = 0;
-  m_object_size[1] = 0;
+  m_flags = 0;               // set all flags to false.
+  m_flag.writeable_p = true; // except this one.
 }
 
 void
 HTTPCacheAlt::destroy()
 {
   ink_assert(m_magic == CACHE_ALT_MAGIC_ALIVE);
-  ink_assert(m_writeable);
+  ink_assert(m_flag.writeable_p);
   m_magic = CACHE_ALT_MAGIC_DEAD;
-  m_writeable = 0;
+  m_flag.writeable_p = 0;
   m_request_hdr.destroy();
   m_response_hdr.destroy();
-  m_frag_offset_count = 0;
-  if (m_frag_offsets && m_frag_offsets != m_integral_frag_offsets) {
-    ats_free(m_frag_offsets);
-    m_frag_offsets = 0;
-  }
+  m_frag_count = 0;
+  if (m_flag.table_allocated_p)
+    ats_free(m_fragments);
+  m_fragments = 0;
   httpCacheAltAllocator.free(this);
 }
 
 void
-HTTPCacheAlt::copy(HTTPCacheAlt *to_copy)
+HTTPCacheAlt::copy(HTTPCacheAlt *that)
 {
-  m_magic = to_copy->m_magic;
-  // m_writeable =      to_copy->m_writeable;
-  m_unmarshal_len = to_copy->m_unmarshal_len;
-  m_id = to_copy->m_id;
-  m_rid = to_copy->m_rid;
-  m_object_key[0] = to_copy->m_object_key[0];
-  m_object_key[1] = to_copy->m_object_key[1];
-  m_object_key[2] = to_copy->m_object_key[2];
-  m_object_key[3] = to_copy->m_object_key[3];
-  m_object_size[0] = to_copy->m_object_size[0];
-  m_object_size[1] = to_copy->m_object_size[1];
+  m_magic = that->m_magic;
+  m_unmarshal_len = that->m_unmarshal_len;
+  m_id = that->m_id;
+  m_rid = that->m_rid;
+  m_earliest = that->m_earliest;
 
-  if (to_copy->m_request_hdr.valid()) {
-    m_request_hdr.copy(&to_copy->m_request_hdr);
+  if (that->m_request_hdr.valid()) {
+    m_request_hdr.copy(&that->m_request_hdr);
   }
 
-  if (to_copy->m_response_hdr.valid()) {
-    m_response_hdr.copy(&to_copy->m_response_hdr);
+  if (that->m_response_hdr.valid()) {
+    m_response_hdr.copy(&that->m_response_hdr);
   }
 
-  m_request_sent_time = to_copy->m_request_sent_time;
-  m_response_received_time = to_copy->m_response_received_time;
-  this->copy_frag_offsets_from(to_copy);
-}
+  m_request_sent_time = that->m_request_sent_time;
+  m_response_received_time = that->m_response_received_time;
+  m_fixed_fragment_size = that->m_fixed_fragment_size;
 
-void
-HTTPCacheAlt::copy_frag_offsets_from(HTTPCacheAlt *src)
-{
-  m_frag_offset_count = src->m_frag_offset_count;
-  if (m_frag_offset_count > 0) {
-    if (m_frag_offset_count > N_INTEGRAL_FRAG_OFFSETS) {
-      /* Mixed feelings about this - technically we don't need it to be a
-         power of two when copied because currently that means it is frozen.
-         But that could change later and it would be a nasty bug to find.
-         So we'll do it for now. The relative overhead is tiny.
-      */
-      int bcount = HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS * 2;
-      while (bcount < m_frag_offset_count)
-        bcount *= 2;
-      m_frag_offsets = static_cast<FragOffset *>(ats_malloc(sizeof(FragOffset) * bcount));
-    } else {
-      m_frag_offsets = m_integral_frag_offsets;
-    }
-    memcpy(m_frag_offsets, src->m_frag_offsets, sizeof(FragOffset) * m_frag_offset_count);
+  m_frag_count = that->m_frag_count;
+
+  if (m_flag.table_allocated_p)
+    ats_free(m_fragments);
+
+  // Safe to copy now, and we need to do that before we copy the fragment table.
+  m_flags = that->m_flags;
+
+  if (that->m_fragments) {
+    size_t size = FragmentDescriptorTable::calc_size(that->m_fragments->m_n);
+    m_fragments = static_cast<FragmentDescriptorTable *>(ats_malloc(size));
+    memcpy(m_fragments, that->m_fragments, size);
+    m_flag.table_allocated_p = true;
+  } else {
+    m_fragments = 0;
+    m_flag.table_allocated_p = false;
   }
 }
 
@@ -1871,7 +1857,7 @@ HTTPInfo::create()
 void
 HTTPInfo::copy(HTTPInfo *hi)
 {
-  if (m_alt && m_alt->m_writeable) {
+  if (m_alt && m_alt->m_flag.writeable_p) {
     destroy();
   }
 
@@ -1879,14 +1865,6 @@ HTTPInfo::copy(HTTPInfo *hi)
   m_alt->copy(hi->m_alt);
 }
 
-void
-HTTPInfo::copy_frag_offsets_from(HTTPInfo *src)
-{
-  if (m_alt && src->m_alt)
-    m_alt->copy_frag_offsets_from(src->m_alt);
-}
-
-
 int
 HTTPInfo::marshal_length()
 {
@@ -1900,10 +1878,8 @@ HTTPInfo::marshal_length()
     len += m_alt->m_response_hdr.m_heap->marshal_length();
   }
 
-  if (m_alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
-    len -= sizeof(m_alt->m_integral_frag_offsets);
-    len += sizeof(FragOffset) * m_alt->m_frag_offset_count;
-  }
+  if (m_alt->m_fragments)
+    len += FragmentDescriptorTable::calc_size(m_alt->m_fragments->m_n);
 
   return len;
 }
@@ -1916,42 +1892,30 @@ HTTPInfo::marshal(char *buf, int len)
   HTTPCacheAlt *marshal_alt = (HTTPCacheAlt *)buf;
   // non-zero only if the offsets are external. Otherwise they get
   // marshalled along with the alt struct.
-  int frag_len = (0 == m_alt->m_frag_offset_count || m_alt->m_frag_offsets == m_alt->m_integral_frag_offsets) ?
-                   0 :
-                   sizeof(HTTPCacheAlt::FragOffset) * m_alt->m_frag_offset_count;
+  size_t frag_len = m_alt->m_fragments ? FragmentDescriptorTable::calc_size(m_alt->m_fragments->m_n) : 0;
 
   ink_assert(m_alt->m_magic == CACHE_ALT_MAGIC_ALIVE);
 
   // Make sure the buffer is aligned
   //    ink_assert(((intptr_t)buf) & 0x3 == 0);
 
-  // If we have external fragment offsets, copy the initial ones
-  // into the integral data.
-  if (frag_len) {
-    memcpy(m_alt->m_integral_frag_offsets, m_alt->m_frag_offsets, sizeof(m_alt->m_integral_frag_offsets));
-    frag_len -= sizeof(m_alt->m_integral_frag_offsets);
-    // frag_len should never be non-zero at this point, as the offsets
-    // should be external only if too big for the internal table.
-  }
   // Memcpy the whole object so that we can use it
   //   live later.  This involves copying a few
   //   extra bytes now but will save copying any
   //   bytes on the way out of the cache
   memcpy(buf, m_alt, sizeof(HTTPCacheAlt));
   marshal_alt->m_magic = CACHE_ALT_MAGIC_MARSHALED;
-  marshal_alt->m_writeable = 0;
+  marshal_alt->m_flag.writeable_p = 0;
   marshal_alt->m_unmarshal_len = -1;
   marshal_alt->m_ext_buffer = NULL;
   buf += HTTP_ALT_MARSHAL_SIZE;
   used += HTTP_ALT_MARSHAL_SIZE;
 
   if (frag_len > 0) {
-    marshal_alt->m_frag_offsets = static_cast<FragOffset *>(reinterpret_cast<void *>(used));
-    memcpy(buf, m_alt->m_frag_offsets + HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS, frag_len);
+    marshal_alt->m_fragments = static_cast<FragmentDescriptorTable *>(reinterpret_cast<void *>(used));
+    memcpy(buf, m_alt->m_fragments, frag_len);
     buf += frag_len;
     used += frag_len;
-  } else {
-    marshal_alt->m_frag_offsets = 0;
   }
 
   // The m_{request,response}_hdr->m_heap pointers are converted
@@ -1993,7 +1957,6 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
 
   if (alt->m_magic == CACHE_ALT_MAGIC_ALIVE) {
     // Already unmarshaled, must be a ram cache
-    //  it
     ink_assert(alt->m_unmarshal_len > 0);
     ink_assert(alt->m_unmarshal_len <= len);
     return alt->m_unmarshal_len;
@@ -2004,31 +1967,14 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
 
   ink_assert(alt->m_unmarshal_len < 0);
   alt->m_magic = CACHE_ALT_MAGIC_ALIVE;
-  ink_assert(alt->m_writeable == 0);
+  ink_assert(alt->m_flag.writeable_p == 0);
   len -= HTTP_ALT_MARSHAL_SIZE;
 
-  if (alt->m_frag_offset_count > HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS) {
-    // stuff that didn't fit in the integral slots.
-    int extra = sizeof(FragOffset) * alt->m_frag_offset_count - sizeof(alt->m_integral_frag_offsets);
-    char *extra_src = buf + reinterpret_cast<intptr_t>(alt->m_frag_offsets);
-    // Actual buffer size, which must be a power of two.
-    // Well, technically not, because we never modify an unmarshalled fragment
-    // offset table, but it would be a nasty bug should that be done in the
-    // future.
-    int bcount = HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS * 2;
-
-    while (bcount < alt->m_frag_offset_count)
-      bcount *= 2;
-    alt->m_frag_offsets =
-      static_cast<FragOffset *>(ats_malloc(bcount * sizeof(FragOffset))); // WRONG - must round up to next power of 2.
-    memcpy(alt->m_frag_offsets, alt->m_integral_frag_offsets, sizeof(alt->m_integral_frag_offsets));
-    memcpy(alt->m_frag_offsets + HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS, extra_src, extra);
-    len -= extra;
-  } else if (alt->m_frag_offset_count > 0) {
-    alt->m_frag_offsets = alt->m_integral_frag_offsets;
-  } else {
-    alt->m_frag_offsets = 0; // should really already be zero.
+  if (alt->m_fragments) {
+    alt->m_fragments = reinterpret_cast<FragmentDescriptorTable *>(buf + reinterpret_cast<intptr_t>(alt->m_fragments));
+    len -= FragmentDescriptorTable::calc_size(alt->m_fragments->m_n);
   }
+  alt->m_flag.table_allocated_p = false;
 
   HdrHeap *heap = (HdrHeap *)(alt->m_request_hdr.m_heap ? (buf + (intptr_t)alt->m_request_hdr.m_heap) : 0);
   HTTPHdrImpl *hh = NULL;
@@ -2044,6 +1990,7 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
     alt->m_request_hdr.m_http = hh;
     alt->m_request_hdr.m_mime = hh->m_fields_impl;
     alt->m_request_hdr.m_url_cached.m_heap = heap;
+    alt->m_request_hdr.mark_target_dirty();
   }
 
   heap = (HdrHeap *)(alt->m_response_hdr.m_heap ? (buf + (intptr_t)alt->m_response_hdr.m_heap) : 0);
@@ -2058,6 +2005,7 @@ HTTPInfo::unmarshal(char *buf, int len, RefCountObj *block_ref)
     alt->m_response_hdr.m_heap = heap;
     alt->m_response_hdr.m_http = hh;
     alt->m_response_hdr.m_mime = hh->m_fields_impl;
+    alt->m_response_hdr.mark_target_dirty();
   }
 
   alt->m_unmarshal_len = orig_len - len;
@@ -2078,7 +2026,7 @@ HTTPInfo::check_marshalled(char *buf, int len)
     return false;
   }
 
-  if (alt->m_writeable != false) {
+  if (alt->m_flag.writeable_p != false) {
     return false;
   }
 
@@ -2167,22 +2115,632 @@ HTTPInfo::get_handle(char *buf, int len)
   return -1;
 }
 
+HTTPInfo::FragmentDescriptor *
+HTTPInfo::force_frag_at(unsigned int idx)
+{
+  FragmentDescriptor *frag;
+  FragmentDescriptorTable *old_table = 0;
+
+  ink_assert(m_alt);
+  ink_assert(idx >= 0);
+
+  if (0 == idx)
+    return &m_alt->m_earliest;
+
+  if (0 == m_alt->m_fragments || idx > m_alt->m_fragments->m_n) { // no room at the inn
+    int64_t obj_size = this->object_size_get();
+    uint32_t ff_size = this->get_frag_fixed_size();
+    unsigned int n = 0; // set if we need to allocate, this is max array index needed.
+
+    ink_assert(ff_size);
+
+    if (0 == m_alt->m_fragments && obj_size > 0) {
+      n = (obj_size + ff_size - 1) / ff_size;
+      if (idx > n)
+        n = idx;
+      if (!m_alt->m_earliest.m_flag.cached_p)
+        ++n; // going to have an empty earliest fragment.
+    } else {
+      n = idx + MAX(4, idx >> 1); // grow by 50% and at least 4
+      old_table = m_alt->m_fragments;
+    }
+
+    size_t size = FragmentDescriptorTable::calc_size(n);
+    size_t old_size = 0;
+    unsigned int old_count = 0;
+    int64_t offset = 0;
+    CryptoHash key;
+
+    m_alt->m_fragments = static_cast<FragmentDescriptorTable *>(ats_malloc(size));
+    ink_zero(*(m_alt->m_fragments)); // just need to zero the base struct.
+    if (old_table) {
+      old_count = old_table->m_n;
+      frag = &((*old_table)[old_count]);
+      offset = frag->m_offset;
+      key = frag->m_key;
+      old_size = FragmentDescriptorTable::calc_size(old_count);
+      memcpy(m_alt->m_fragments, old_table, old_size);
+      if (m_alt->m_flag.table_allocated_p)
+        ats_free(old_table);
+    } else {
+      key = m_alt->m_earliest.m_key;
+      m_alt->m_fragments->m_cached_idx = 0;
+    }
+    m_alt->m_fragments->m_n = n;
+    m_alt->m_flag.table_allocated_p = true;
+    // fill out the new parts with offsets & keys.
+    ++old_count; // left as the index of the last frag in the previous set.
+    for (frag = &((*m_alt->m_fragments)[old_count]); old_count <= n; ++old_count, ++frag) {
+      key.next();
+      offset += ff_size;
+      frag->m_key = key;
+      frag->m_offset = offset;
+      frag->m_flags = 0;
+    }
+  }
+  ink_assert(idx > m_alt->m_fragments->m_cached_idx);
+  return &(*m_alt->m_fragments)[idx];
+}
+
 void
-HTTPInfo::push_frag_offset(FragOffset offset)
+HTTPInfo::mark_frag_write(unsigned int idx)
 {
   ink_assert(m_alt);
-  if (0 == m_alt->m_frag_offsets) {
-    m_alt->m_frag_offsets = m_alt->m_integral_frag_offsets;
-  } else if (m_alt->m_frag_offset_count >= HTTPCacheAlt::N_INTEGRAL_FRAG_OFFSETS &&
-             0 == (m_alt->m_frag_offset_count & (m_alt->m_frag_offset_count - 1))) {
-    // need more space than in integral storage and we're at an upgrade
-    // size (power of 2).
-    FragOffset *nf = static_cast<FragOffset *>(ats_malloc(sizeof(FragOffset) * (m_alt->m_frag_offset_count * 2)));
-    memcpy(nf, m_alt->m_frag_offsets, sizeof(FragOffset) * m_alt->m_frag_offset_count);
-    if (m_alt->m_frag_offsets != m_alt->m_integral_frag_offsets)
-      ats_free(m_alt->m_frag_offsets);
-    m_alt->m_frag_offsets = nf;
+  ink_assert(idx >= 0);
+
+  if (idx >= m_alt->m_frag_count)
+    m_alt->m_frag_count = idx + 1;
+
+  if (0 == idx) {
+    m_alt->m_earliest.m_flag.cached_p = true;
+  } else {
+    this->force_frag_at(idx)->m_flag.cached_p = true;
+  }
+
+  // bump the last cached value if possible and mark complete if appropriate.
+  if (m_alt->m_fragments && idx == m_alt->m_fragments->m_cached_idx + 1) {
+    unsigned int j = idx + 1;
+    while (j < m_alt->m_frag_count && (*m_alt->m_fragments)[j].m_flag.cached_p)
+      ++j;
+    m_alt->m_fragments->m_cached_idx = j - 1;
+    if (!m_alt->m_flag.content_length_p &&
+        (this->get_frag_fixed_size() + this->get_frag_offset(j - 1)) > static_cast<int64_t>(m_alt->m_earliest.m_offset))
+      m_alt->m_flag.complete_p = true;
+  }
+}
+
+int
+HTTPInfo::get_frag_index_of(int64_t offset)
+{
+  int zret = 0;
+  uint32_t ff_size = this->get_frag_fixed_size();
+  FragmentDescriptorTable *table = this->get_frag_table();
+  if (!table) {
+    // Never the case that we have an empty earliest fragment *and* no frag table.
+    zret = offset / ff_size;
+  } else {
+    FragmentDescriptorTable &frags = *table; // easier to work with.
+    int n = frags.m_n;                       // also the max valid frag table index and always >= 1.
+    // I should probably make @a m_offset int64_t to avoid casting issues like this...
+    uint64_t uoffset = static_cast<uint64_t>(offset);
+
+    if (uoffset >= frags[n].m_offset) {
+      // in or past the last fragment, compute the index by computing the # of @a ff_size chunks past the end.
+      zret = n + (static_cast<uint64_t>(offset) - frags[n].m_offset) / ff_size;
+    } else if (uoffset < frags[1].m_offset) {
+      zret = 0; // in the earliest fragment.
+    } else {
+      // Need to handle old data where the offsets are not guaranteed to be regular.
+      // So we start with our guess (which should be close) and if we're right, boom, else linear
+      // search which should only be 1 or 2 steps.
+      zret = offset / ff_size;
+      if (frags[1].m_offset == 0 || 0 == zret) // zret can be zero if the earliest frag is less than @a ff_size
+        ++zret;
+      while (0 < zret && zret < n) {
+        if (uoffset < frags[zret].m_offset) {
+          --zret;
+        } else if (uoffset >= frags[zret + 1].m_offset) {
+          ++zret;
+        } else {
+          break;
+        }
+      }
+    }
+  }
+  return zret;
+}
+/***********************************************************************
+ *                                                                     *
+ *                      R A N G E   S U P P O R T                      *
+ *                                                                     *
+ ***********************************************************************/
+
+namespace
+{
+// Need to promote this out of here at some point.
+// This handles parsing an integer from a string with various limits and in 64 bits.
+struct integer {
+  static size_t const MAX_DIGITS = 15;
+  static bool
+  parse(ts::ConstBuffer const &b, uint64_t &result)
+  {
+    bool zret = false;
+    if (0 < b.size() && b.size() <= MAX_DIGITS) {
+      size_t n;
+      result = ats_strto64(b.data(), b.size(), &n);
+      zret = n == b.size();
+    }
+    return zret;
+  }
+};
+}
+
+bool
+HTTPRangeSpec::parseRangeFieldValue(char const *v, int len)
+{
+  // Maximum # of digits permitted for an offset. Avoid issues with overflow.
+  static size_t const MAX_DIGITS = 15;
+  ts::ConstBuffer src(v, len);
+  size_t n;
+
+  _state = INVALID;
+  src.skip(&ParseRules::is_ws);
+
+  if (src.size() > sizeof(HTTP_LEN_BYTES) + 1 && 0 == strncasecmp(src.data(), HTTP_VALUE_BYTES, HTTP_LEN_BYTES) &&
+      '=' == src[HTTP_LEN_BYTES]) {
+    src += HTTP_LEN_BYTES + 1;
+    while (src) {
+      ts::ConstBuffer max = src.splitOn(',');
+
+      if (!max) { // no comma so everything in @a src should be processed as a single range.
+        max = src;
+        src.reset();
+      }
+
+      ts::ConstBuffer min = max.splitOn('-');
+
+      src.skip(&ParseRules::is_ws);
+      // Spec forbids whitespace anywhere in the range element.
+
+      if (min) {
+        if (ParseRules::is_digit(*min) && min.size() <= MAX_DIGITS) {
+          uint64_t low = ats_strto64(min.data(), min.size(), &n);
+          if (n < min.size())
+            break; // extra cruft in range, not even ws allowed
+          if (max) {
+            if (ParseRules::is_digit(*max) && max.size() <= MAX_DIGITS) {
+              uint64_t high = ats_strto64(max.data(), max.size(), &n);
+              if (n < max.size() && (max += n).skip(&ParseRules::is_ws))
+                break; // non-ws cruft after maximum
+              else
+                this->add(low, high);
+            } else {
+              break; // invalid characters for maximum
+            }
+          } else {
+            this->add(low, UINT64_MAX); // "X-" : "offset X to end of content"
+          }
+        } else {
+          break; // invalid characters for minimum
+        }
+      } else {
+        if (max) {
+          if (ParseRules::is_digit(*max) && max.size() <= MAX_DIGITS) {
+            uint64_t high = ats_strto64(max.data(), max.size(), &n);
+            if (n < max.size() && (max += n).skip(&ParseRules::is_ws)) {
+              break; // cruft after end of maximum
+            } else {
+              this->add(high, 0);
+            }
+          } else {
+            break; // invalid maximum
+          }
+        }
+      }
+    }
+    if (src)
+      _state = INVALID; // didn't parse everything, must have been an error.
+  }
+  return _state != INVALID;
+}
+
+HTTPRangeSpec &
+HTTPRangeSpec::add(Range const &r)
+{
+  if (MULTI == _state) {
+    _ranges.push_back(r);
+  } else if (SINGLE == _state) {
+    _ranges.push_back(_single);
+    _ranges.push_back(r);
+    _state = MULTI;
+  } else {
+    _single = r;
+    _state = SINGLE;
+  }
+  return *this;
+}
+
+bool
+HTTPRangeSpec::apply(uint64_t len)
+{
+  if (!this->hasRanges()) {
+    // nothing - simplifying later logic.
+  } else if (0 == len) {
+    /* Must special case zero length content
+       - suffix ranges are OK but other ranges are not.
+       - Best option is to return a 200 (not 206 or 416) for all suffix range spec on zero length content.
+         (this is what Apache HTTPD does)
+       - So, mark result as either @c UNSATISFIABLE or @c EMPTY, clear all ranges.
+    */
+    _state = EMPTY;
+    if (!_single.isSuffix())
+      _state = UNSATISFIABLE;
+    for (RangeBox::iterator spot = _ranges.begin(), limit = _ranges.end(); spot != limit && EMPTY == _state; ++spot) {
+      if (!spot->isSuffix())
+        _state = UNSATISFIABLE;
+    }
+    _ranges.clear();
+  } else if (this->isSingle()) {
+    if (!_single.apply(len))
+      _state = UNSATISFIABLE;
+  } else { // gotta be MULTI
+    int src = 0, dst = 0;
+    int n = _ranges.size();
+    while (src < n) {
+      Range &r = _ranges[src];
+      if (r.apply(len)) {
+        if (src != dst)
+          _ranges[dst] = r;
+        ++dst;
+      }
+      ++src;
+    }
+    // at this point, @a dst is the # of valid ranges.
+    if (dst > 0) {
+      _single = _ranges[0];
+      if (dst == 1)
+        _state = SINGLE;
+      _ranges.resize(dst);
+    } else {
+      _state = UNSATISFIABLE;
+      _ranges.clear();
+    }
+  }
+  return this->isValid();
+}
+
+static ts::ConstBuffer const MULTIPART_BYTERANGE("multipart/byteranges", 20);
+static ts::ConstBuffer const MULTIPART_BOUNDARY("boundary", 9);
+
+int64_t
+HTTPRangeSpec::parseContentRangeFieldValue(char const *v, int len, Range &r, ts::ConstBuffer &boundary)
+{
+  // [amc] TBD - handle the multipart/byteranges syntax.
+  ts::ConstBuffer src(v, len);
+  int64_t zret = -1;
+
+  r.invalidate();
+  src.skip(&ParseRules::is_ws);
+
+  if (src.skipNoCase(MULTIPART_BYTERANGE)) {
+    while (src && (';' == *src || ParseRules::is_ws(*src)))
+      ++src;
+    if (src.skipNoCase(MULTIPART_BOUNDARY)) {
+      src.trim(&ParseRules::is_ws);
+      boundary = src;
+    }
+  } else if (src.size() > sizeof(HTTP_LEN_BYTES) + 1 && 0 == strncasecmp(src.data(), HTTP_VALUE_BYTES, HTTP_LEN_BYTES) &&
+             ParseRules::is_ws(src[HTTP_LEN_BYTES]) // must have white space
+             ) {
+    uint64_t cl, low, high;
+    bool unsatisfied_p = false, indeterminate_p = false;
+    ts::ConstBuffer min, max;
+
+    src += HTTP_LEN_BYTES;
+    src.skip(&ParseRules::is_ws); // but can have any number
+
+    max = src.splitOn('/'); // src has total length value
+
+    if (max.size() == 1 && *max == '*')
+      unsatisfied_p = true;
+    else
+      min = max.splitOn('-');
+
+    src.trim(&ParseRules::is_ws);
+    if (src && src.size() == 1 && *src == '*')
+      indeterminate_p = true;
+
+    // note - spec forbids internal spaces so it's "X-Y/Z" w/o whitespace.
+    // spec also says we can have "*/Z" or "X-Y/*" but never "*/*".
+
+    if (!(indeterminate_p && unsatisfied_p) && (indeterminate_p || integer::parse(src, cl)) &&
+        (unsatisfied_p || (integer::parse(min, low) && integer::parse(max, high)))) {
+      if (!unsatisfied_p)
+        r._min = low, r._max = high;
+      if (!indeterminate_p)
+        zret = static_cast<int64_t>(cl);
+    }
+  }
+  return zret;
+}
+
+namespace
+{
+int
+Calc_Digital_Length(uint64_t x)
+{
+  char buff[32]; // big enough for 64 bit #
+  return snprintf(buff, sizeof(buff), "%" PRIu64, x);
+}
+}
+
+uint64_t
+HTTPRangeSpec::calcPartBoundarySize(uint64_t object_size, uint64_t ct_val_len)
+{
+  size_t l_size = Calc_Digital_Length(object_size);
+  // CR LF "--" boundary-string CR LF "Content-Range" ": " "bytes " X "-" Y "/" Z CR LF Content-Type CR LF
+  uint64_t zret =
+    4 + HTTP_RANGE_BOUNDARY_LEN + 2 + MIME_LEN_CONTENT_RANGE + 2 + HTTP_LEN_BYTES + 1 + l_size + 1 + l_size + 1 + l_size + 2;
+  if (ct_val_len)
+    zret += MIME_LEN_CONTENT_TYPE + 2 + ct_val_len + 2;
+  return zret;
+}
+
+uint64_t
+HTTPRangeSpec::calcContentLength(uint64_t object_size, uint64_t ct_val_len) const
+{
+  uint64_t size = object_size;
+  size_t nr = this->count();
+
+  if (nr >= 1) {
+    size = this->size();                                                    // the real content size.
+    if (nr > 1)                                                             // part boundaries
+      size += nr * self::calcPartBoundarySize(object_size, ct_val_len) + 2; // need trailing '--'
+  }
+  return size;
+}
+
+uint64_t
+HTTPRangeSpec::writePartBoundary(MIOBuffer *out, char const *boundary_str, size_t boundary_len, uint64_t total_size, uint64_t low,
+                                 uint64_t high, MIMEField *ctf, bool final)
+{
+  size_t x;                                                  // tmp for printf results.
+  size_t loc_size = Calc_Digital_Length(total_size) * 3 + 3; // precomputed size of all the location / size text.
+  size_t n = self::calcPartBoundarySize(total_size, ctf ? ctf->m_len_value : 0) + (final ? 2 : 0);
+  Ptr<IOBufferData> d(new_IOBufferData(iobuffer_size_to_index(n, MAX_BUFFER_SIZE_INDEX), MEMALIGNED));
+  char *spot = d->data();
+
+  x = snprintf(spot, n, "\r\n--%.*s", static_cast<int>(boundary_len), boundary_str);
+  spot += x;
+  n -= x;
+  if (final) {
+    memcpy(spot, "--", 2);
+    spot += 2;
+    n -= 2;
+  }
+
+  x = snprintf(spot, n, "\r\n%.*s: %.*s", MIME_LEN_CONTENT_RANGE, MIME_FIELD_CONTENT_RANGE, HTTP_LEN_BYTES, HTTP_VALUE_BYTES);
+  spot += x;
+  n -= x;
+  spot[-HTTP_LEN_BYTES] = tolower(spot[-HTTP_LEN_BYTES]); // ugly cleanup just to be careful of stupid user agents.
+
+  x = snprintf(spot, n, " %" PRIu64 "-%" PRIu64 "/%" PRIu64, low, high, total_size);
+  // Need to space fill to match pre-computed size
+  if (x < loc_size)
+    memset(spot + x, ' ', loc_size - x);
+  spot += loc_size;
+  n -= loc_size;
+
+  if (ctf) {
+    int ctf_len;
+    char const *ctf_val = ctf->value_get(&ctf_len);
+    if (ctf_val) {
+      x = snprintf(spot, n, "\r\n%.*s: %.*s", MIME_LEN_CONTENT_TYPE, MIME_FIELD_CONTENT_TYPE, ctf_len, ctf_val);
+      spot += x;
+      n -= x;
+    }
+  }
+
+  // This also takes care of the snprintf null termination problem.
+  *spot++ = '\r';
+  *spot++ = '\n';
+  n -= 2;
+
+  ink_assert(n == 0);
+
+  IOBufferBlock *b = new_IOBufferBlock(d, spot - d->data());
+  b->_buf_end = b->_end;
+  out->append_block(b);
+
+  return spot - d->data();
+}
+
+int
+HTTPRangeSpec::print_array(char *buff, size_t len, Range const *rv, int count)
+{
+  size_t zret = 0;
+  bool first = true;
+
+  // Can't possibly write a range in less than this size buffer.
+  if (len < static_cast<size_t>(HTTP_LEN_BYTES) + 4)
+    return 0;
+
+  for (int i = 0; i < count; ++i) {
+    int n;
+
+    if (first) {
+      memcpy(buff, HTTP_VALUE_BYTES, HTTP_LEN_BYTES);
+      buff[HTTP_LEN_BYTES] = '=';
+      zret += HTTP_LEN_BYTES + 1;
+      first = false;
+    } else if (len < zret + 4) {
+      break;
+    } else {
+      buff[zret++] = ',';
+    }
+
+    n = snprintf(buff + zret, len - zret, "%" PRIu64 "-%" PRIu64, rv[i]._min, rv[i]._max);
+    if (n + zret >= len)
+      break; // ran out of room
+    else
+      zret += n;
+  }
+  return zret;
+}
+
+int
+HTTPRangeSpec::print(char *buff, size_t len) const
+{
+  return this->hasRanges() ? this->print_array(buff, len, &(*(this->begin())), this->count()) : 0;
+}
+
+int
+HTTPRangeSpec::print_quantized(char *buff, size_t len, int64_t quantum, int64_t interstitial) const
+{
+  static const int MAX_R = 20; // this needs to be promoted
+  // We will want to have a max # of ranges limit, probably a build time constant, in the not so distant
+  // future anyway, so might as well start here.
+  int qrn = 0;     // count of quantized ranges
+  Range qr[MAX_R]; // quantized ranges
+
+  // Can't possibly write a range in less than this size buffer.
+  if (len < static_cast<size_t>(HTTP_LEN_BYTES) + 4)
+    return 0;
+
+  // Avoid annoying "+1" in the adjacency checks.
+  if (interstitial < 1)
+    interstitial = 1;
+  else
+    ++interstitial;
+
+  for (const_iterator spot = this->begin(), limit = this->end(); spot != limit; ++spot) {
+    Range r(*spot);
+    int i;
+    if (quantum > 1) {
+      r._min = (r._min / quantum) * quantum;
+      r._max = ((r._max + quantum - 1) / quantum) * quantum - 1;
+    }
+    // blend in to the current ranges
+    for (i = 0; i < qrn; ++i) {
+      Range &cr = qr[i];
+      if ((r._max + interstitial) < cr._min) {
+        memmove(qr, qr + 1, sizeof(*qr) * qrn);
+        ++qrn;
+        qr[0] = r;
+        i = -1;
+        break;
+      } else if (cr._max + interstitial >= r._min) {
+        int j = i + 1;
+        cr._min = std::min(cr._min, r._min);
+        cr._max = std::max(cr._max, r._max);
+        while (j < qrn) {
+          if (qr[j]._min < cr._max + interstitial)
+            cr._max = std::max(cr._max, qr[j]._max);
+          ++j;
+        }
+        if (j < qrn)
+          memmove(qr + i + 1, qr + j, sizeof(*qr) * qrn - j);
+        qrn -= j - i;
+        i = -1;
+        break;
+      }
+    }
+    if (i >= qrn)
+      qr[qrn++] = r;
+    ink_assert(qrn <= MAX_R);
+  }
+
+  return this->print_array(buff, len, qr, qrn);
+}
+
+HTTPRangeSpec::Range
+HTTPInfo::get_range_for_frags(int low, int high)
+{
+  HTTPRangeSpec::Range zret;
+  zret._min = low < 1 ? 0 : (*m_alt->m_fragments)[low].m_offset;
+  zret._max =
+    (high >= static_cast<int>(m_alt->m_frag_count) - 1 ? this->object_size_get() : (*m_alt->m_fragments)[high + 1].m_offset) - 1;
+  return zret;
+}
+
+/* Note - we're not handling unspecified content length and trailing segments at all here.
+   Must deal with that at some point.
+*/
+
+HTTPRangeSpec::Range
+HTTPInfo::get_uncached_hull(HTTPRangeSpec const &req, int64_t initial)
+{
+  HTTPRangeSpec::Range r;
+
+  if (m_alt && !m_alt->m_flag.complete_p) {
+    HTTPRangeSpec::Range s = req.getConvexHull();
+    if (m_alt->m_fragments) {
+      FragmentDescriptorTable &fdt = *(m_alt->m_fragments);
+      int32_t lidx;
+      int32_t ridx;
+      if (s.isValid()) {
+        lidx = this->get_frag_index_of(s._min);
+        ridx = this->get_frag_index_of(s._max);
+      } else { // not a range request, get hull of all uncached fragments
+        lidx = fdt.m_cached_idx + 1;
+        // This really isn't valid if !content_length_p, need to deal with that at some point.
+        ridx = this->get_frag_index_of(this->object_size_get());
+      }
+
+      if (lidx < 2 && !m_alt->m_earliest.m_flag.cached_p)
+        lidx = 0;
+      else {
+        if (0 == lidx)
+          ++lidx; // because if we get here with lidx == 0, earliest is cached and we should skip ahead.
+        while (lidx <= ridx && fdt[lidx].m_flag.cached_p)
+          ++lidx;
+      }
+
+      while (lidx <= ridx && fdt[ridx].m_flag.cached_p)
+        --ridx;
+
+      if (lidx <= ridx)
+        r = this->get_range_for_frags(lidx, ridx);
+    } else { // no fragments past earliest cached yet
+      r._min = m_alt->m_earliest.m_flag.cached_p ? this->get_frag_fixed_size() : 0;
+      if (s.isValid()) {
+        r._min = std::max(r._min, s._min);
+        r._max = s._max;
+      } else {
+        r._max = INT64_MAX;
+      }
+    }
+    if (r.isValid() && m_alt->m_flag.content_length_p && static_cast<int64_t>(r._max) > this->object_size_get())
+      r._max = this->object_size_get();
+    if (static_cast<int64_t>(r._min) < initial && !m_alt->m_earliest.m_flag.cached_p)
+      r._min = 0;
   }
+  return r;
+}
 
-  m_alt->m_frag_offsets[m_alt->m_frag_offset_count++] = offset;
+#if 0
+bool
+HTTPInfo::get_uncached(HTTPRangeSpec const& req, HTTPRangeSpec& result)
+{
+  bool zret = false;
+  if (m_alt && !m_alt->m_flag.complete_p) {
+    FragmentAccessor frags(m_alt);
+
+    for ( HTTPRangeSpec::const_iterator spot = req.begin(), limit = req.end() ; spot != limit ; ++spot ) {
+      int32_t lidx = this->get_frag_index_of(spot->_min);
+      int32_t ridx = this->get_frag_index_of(spot->_max);
+      while (lidx <= ridx && frags[lidx].m_flag.cached_p)
+        ++lidx;
+      if (lidx > ridx) continue; // All of this range is present.
+      while (lidx <= ridx && frags[ridx].m_flag.cached_p) // must hit missing frag at lhs at the latest
+        --ridx;
+
+      if (lidx <= ridx) {
+        result.add(this->get_range_for_frags(lidx, ridx));
+        zret = true;
+      }
+    }
+  }
+  return zret;
 }
+#endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/hdrs/HTTP.h
----------------------------------------------------------------------
diff --git a/proxy/hdrs/HTTP.h b/proxy/hdrs/HTTP.h
index 4c80bcc..cb6b4fd 100644
--- a/proxy/hdrs/HTTP.h
+++ b/proxy/hdrs/HTTP.h
@@ -1,32 +1,33 @@
 /** @file
 
-  A brief file description
+    A brief file description
 
-  @section license License
+    @section license License
 
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
 
-      http://www.apache.org/licenses/LICENSE-2.0
+    http://www.apache.org/licenses/LICENSE-2.0
 
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
- */
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+*/
 
 #ifndef __HTTP_H__
 #define __HTTP_H__
 
 #include <assert.h>
+#include <vector>
 #include "Arena.h"
-#include "INK_MD5.h"
+#include "CryptoHash.h"
 #include "MIME.h"
 #include "URL.h"
 
@@ -37,6 +38,11 @@
 #define HTTP_MAJOR(v) (((v) >> 16) & 0xFFFF)
 
 class Http2HeaderTable;
+class MIOBuffer;
+namespace ts
+{
+struct ConstBuffer;
+}
 
 enum HTTPStatus {
   HTTP_STATUS_NONE = 0,
@@ -416,6 +422,8 @@ extern int HTTP_LEN_S_MAXAGE;
 extern int HTTP_LEN_NEED_REVALIDATE_ONCE;
 extern int HTTP_LEN_100_CONTINUE;
 
+static size_t const HTTP_RANGE_BOUNDARY_LEN = 32 + 2 + 16;
+
 /* Private */
 void http_hdr_adjust(HTTPHdrImpl *hdrp, int32_t offset, int32_t length, int32_t delta);
 
@@ -463,13 +471,13 @@ int32_t http_parse_version(const char *start, const char *end);
 
 
 /*
-HTTPValAccept*         http_parse_accept (const char *buf, Arena *arena);
-HTTPValAcceptCharset*  http_parse_accept_charset (const char *buf, Arena *arena);
-HTTPValAcceptEncoding* http_parse_accept_encoding (const char *buf, Arena *arena);
-HTTPValAcceptLanguage* http_parse_accept_language (const char *buf, Arena *arena);
-HTTPValCacheControl*   http_parse_cache_control (const char *buf, Arena *arena);
-const char*            http_parse_cache_directive (const char **buf);
-HTTPValRange*          http_parse_range (const char *buf, Arena *arena);
+  HTTPValAccept*         http_parse_accept (const char *buf, Arena *arena);
+  HTTPValAcceptCharset*  http_parse_accept_charset (const char *buf, Arena *arena);
+  HTTPValAcceptEncoding* http_parse_accept_encoding (const char *buf, Arena *arena);
+  HTTPValAcceptLanguage* http_parse_accept_language (const char *buf, Arena *arena);
+  HTTPValCacheControl*   http_parse_cache_control (const char *buf, Arena *arena);
+  const char*            http_parse_cache_directive (const char **buf);
+  HTTPValRange*          http_parse_range (const char *buf, Arena *arena);
 */
 HTTPValTE *http_parse_te(const char *buf, int len, Arena *arena);
 
@@ -496,6 +504,235 @@ public:
   int32_t m_version;
 };
 
+/** A set of content ranges.
+
+    This represents the data for an HTTP range specification.
+    On a request this contains the request ranges. On a response it is the actual ranges in the
+    response, which are the requested ranges modified by the actual content length.
+*/
+struct HTTPRangeSpec {
+  typedef HTTPRangeSpec self;
+
+  /** A range of bytes in an object.
+
+      If @a _min > 0 and @a _max == 0 the range is backwards and counts from the
+      end of the object. That is (100,0) means the last 100 bytes of content.
+  */
+  struct Range {
+    uint64_t _min;
+    uint64_t _max;
+
+    /// Default constructor - invalid range.
+    Range() : _min(UINT64_MAX), _max(1) {}
+    /// Construct as the range ( @a low .. @a high )
+    Range(uint64_t low, uint64_t high) : _min(low), _max(high) {}
+
+    /// Test if this range is a suffix range.
+    bool isSuffix() const;
+    /// Test if this range is a valid range.
+    bool isValid() const;
+    /// Get the size (in bytes) of the range.
+    uint64_t size() const;
+    /** Convert range to absolute values for a content length of @a len.
+
+        @return @c true if the range was valid for @a len, @c false otherwise.
+    */
+    bool apply(uint64_t len);
+
+    /// Force the range to an empty state.
+    Range &invalidate();
+  };
+
+  /// Range iteration type.
+  typedef Range *iterator;
+  typedef Range const *const_iterator;
+
+  /// Current state of the overall specification.
+  /// @internal We can distinguish between @c SINGLE and @c MULTI by looking at the
+  /// size of @a _ranges but we need this to mark @c EMPTY vs. not.
+  enum State {
+    EMPTY,         ///< No range.
+    INVALID,       ///< Range parsing failed.
+    UNSATISFIABLE, ///< Content length application failed.
+    SINGLE,        ///< Single range.
+    MULTI,         ///< Multiple ranges.
+  } _state;
+
+  /// The first range value.
+  /// By separating this out we can avoid allocation in the case of a single
+  /// range value, which is by far the most common ( > 99% in my experience).
+  Range _single;
+  /// Storage for range values.
+  typedef std::vector<Range> RangeBox;
+  /// The first range is copied here if there is more than one (to simplify).
+  RangeBox _ranges;
+
+  /// Default constructor - empty range
+  HTTPRangeSpec();
+
+  /// Reset to re-usable state.
+  void clear();
+
+  /** Parse a Range field @a value and update @a this with the results.
+      @return @c true if @a value was a valid range specifier, @c false otherwise.
+  */
+  bool parseRangeFieldValue(char const *value, int len);
+
+  /** Parse a Content-Range field @a value.
+
+      @a r is set to the content range. If the content range is unsatisfied or a parse error the @a range is
+      set to be invalid.
+
+      @note The content length return is ambiguous on its own, the state of @a r must be checked.
+
+      - Multipart: @a boundary is not empty
+      - Parse error: @a CL == -1 and @a r is invalid
+      - Unsatisfiable: @a CL >= 0 and @a r is invalid
+      - Indeterminate: @c CL == -1 and @a r is valid
+
+      @return The content length, or -1 if there is an error or the content length is indeterminate.
+  */
+  static int64_t parseContentRangeFieldValue(char const *value, int len, Range &r, ts::ConstBuffer &boundary);
+
+  /// Print the range specification.
+  /// @return The number of characters printed.
+  int print(char *buff ///< Output buffer.
+            ,
+            size_t len ///< Size of output buffer.
+            ) const;
+
+  /// Print the range specification quantized.
+  /// @return The number of characters printed.
+  int print_quantized(char *buff ///< Output buffer.
+                      ,
+                      size_t len ///< Size of output buffer.
+                      ,
+                      int64_t quantum ///< Align ranges to multiples of this value.
+                      ,
+                      int64_t interstitial ///< Require gaps to be at least this large.
+                      ) const;
+
+  /// Print the @a ranges.
+  /// @return The number of characters printed.
+  static int print_array(char *buff ///< Output buffer.
+                         ,
+                         size_t len ///< Size of output buffer.
+                         ,
+                         Range const *ranges ///< Array of ranges
+                         ,
+                         int count ///< # of ranges
+                         );
+
+#if 0
+  /** Copy ranges from @a while applying them to the content @a length.
+
+      Ranges are copied if valid for @a length and converted to absolute offsets. The number of ranges
+      after application may be less than the @a src number of ranges. In addition ranges will be clipped
+      to @a length. 
+
+      @return @c true if the range spec is satisfiable, @c false otherwise.
+      Note a range spec with no ranges is always satisfiable and that suffix ranges are also
+      always satisfiable.
+  */
+  bool apply(self const& that, uint64_t length);
+#endif
+
+  /** Update ranges to be absolute based on content @a length.
+
+      Invalid ranges are removed, ranges will be clipped as needed, and suffix ranges will be
+      converted to absolute ranges.
+
+      @return @c true if the range spec is satisfiable (there remains at least one valid range), @c false otherwise.
+      Note a range spec with no ranges is always satisfiable and that suffix ranges are also
+      always satisfiable.
+  */
+  bool apply(uint64_t length);
+
+  /** Number of distinct ranges.
+      @return Number of ranges.
+  */
+  size_t count() const;
+
+  /// Get the size (in bytes) of the ranges.
+  uint64_t size() const;
+
+  /// If this is a valid  single range specification.
+  bool isSingle() const;
+
+  /// If this is a valid multi range specification.
+  bool isMulti() const;
+
+  /// Test if this contains at least one valid range.
+  bool hasRanges() const;
+
+  /// Test if this is a well formed range (may be empty).
+  bool isValid() const;
+
+  /// Test if this is a valid but empty range spec.
+  bool isEmpty() const;
+
+  /// Test if this is an unsatisfied range.
+  bool isUnsatisfied() const;
+
+  /// Access the range at index @a idx.
+  Range &operator[](int n);
+
+  /// Access the range at index @a idx.
+  Range const &operator[](int n) const;
+
+  /// Calculate the convex hull of the range spec.
+  /// The convex hull is the smallest single range that contains all of the ranges in the range spec.
+  /// @note This will return an invalid range if there are no ranges in the range spec.
+  /// @see HttpRangeSpec::Range::isValid
+  Range getConvexHull() const;
+
+  /** Calculate the content length for this range specification.
+
+      @note If a specific content length has not been @c apply 'd this will not produce
+      a usable result.
+
+      @return The content length for the ranges including the range separators.
+  */
+  uint64_t calcContentLength(uint64_t base_content_size, ///< Content size w/o ranges.
+                             uint64_t ct_val_len         ///< Length of Content-Type field value.
+                             ) const;
+
+  /// Calculate the length of the range part boundary header.
+  static uint64_t calcPartBoundarySize(uint64_t object_size ///< Base content size
+                                       ,
+                                       uint64_t ct_val_len ///< Length of the Content-Type value (0 if none).
+                                       );
+
+  /** Write the range part boundary to @a out.
+   */
+  static uint64_t writePartBoundary(MIOBuffer *out ///< Output IO Buffer
+                                    ,
+                                    char const *boundary_str ///< Boundary marker string.
+                                    ,
+                                    size_t boundary_len ///< Length of boundary marker string.
+                                    ,
+                                    uint64_t total_size ///< Base content size.
+                                    ,
+                                    uint64_t low ///< Low value for the range.
+                                    ,
+                                    uint64_t high ///< High value for the raNGE.
+                                    ,
+                                    MIMEField *ctf ///< Content-Type field (@c NULL if none)
+                                    ,
+                                    bool final ///< Is this the final part boundary?
+                                    );
+
+  /// Iterator for first range.
+  iterator begin();
+  const_iterator begin() const;
+  /// Iterator past last range.
+  iterator end();
+  const_iterator end() const;
+
+  self &add(uint64_t low, uint64_t high);
+  self &add(Range const &r);
+};
+
 class IOBufferReader;
 
 class HTTPHdr : public MIMEHdr
@@ -631,6 +868,7 @@ public:
 
   const char *reason_get(int *length);
   void reason_set(const char *value, int length);
+  void reason_set(HTTPStatus status);
 
   MIMEParseResult parse_req(HTTPParser *parser, const char **start, const char *end, bool eof);
   MIMEParseResult parse_resp(HTTPParser *parser, const char **start, const char *end, bool eof);
@@ -1243,6 +1481,16 @@ HTTPHdr::reason_set(const char *value, int length)
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
+inline void
+HTTPHdr::reason_set(HTTPStatus status)
+{
+  char const *phrase = http_hdr_reason_lookup(status);
+  this->reason_set(phrase, strlen(phrase));
+}
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+
 inline MIMEParseResult
 HTTPHdr::parse_req(HTTPParser *parser, const char **start, const char *end, bool eof)
 {
@@ -1316,34 +1564,114 @@ HTTPHdr::scheme_get(int *length)
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
-enum {
-  CACHE_ALT_MAGIC_ALIVE = 0xabcddeed,
-  CACHE_ALT_MAGIC_MARSHALED = 0xdcbadeed,
-  CACHE_ALT_MAGIC_DEAD = 0xdeadeed,
-};
+enum { CACHE_ALT_MAGIC_ALIVE = 0xabcddeed, CACHE_ALT_MAGIC_MARSHALED = 0xdcbadeed, CACHE_ALT_MAGIC_DEAD = 0xdeadeed };
 
-// struct HTTPCacheAlt
+/// Header for an alternate of an object.
+/// This is close to a POD, all the real API is in the @c HTTPInfo class.
+/// @note THIS IS DIRECTLY SERIALIZED TO DISK
+/// (after some tweaks, but any member in this struct will be written to disk)
 struct HTTPCacheAlt {
+  /// Information about a fragment in this alternate.
+  /// @internal Currently @c Dir has only 40 bits for the disk offset of a fragment,
+  /// and since no object (or alternate) is split across stripes (and thence disks)
+  /// no fragment can have an internal offset more than 40 bits long, so 48 bits
+  /// should suffice here.
+  struct FragmentDescriptor {
+    CryptoHash m_key;       ///< Key for fragment.
+    uint64_t m_offset : 48; ///< Starting offset of fragment in object.
+    union {
+      uint16_t m_flags;
+      struct {
+        unsigned int cached_p : 1; ///< Presence bit (is fragment in cache?)
+        unsigned int zero : 15;    ///< Zero fill for future use.
+      } m_flag;
+    };
+  };
+
+  /** Holds the table of fragment descriptors.
+
+      @internal To avoid allocating 2 chunks of memory we hang the descriptors off the end of this structure and provide
+      a method to do the calculations. The @a m_size contains the number of descriptors, the actual byte size must be
+      computed from that. The count of valid entries is held in this structure, not in the table, because it makes
+      serialization easier.  We don't serialize the explicit contents of the table struct (e.g., the capacity / @a
+      m_size value) only the descriptors.
+  */
+  struct FragmentDescriptorTable {
+    /** The number of entries in the table.
+        Because this is a 1 based array, this is also the largest valid index.
+        @note It is 1 less than the total number of fragment descriptors because earliest is stored
+        directly and not in this table.
+     */
+    uint32_t m_n;
+
+    /** Fragment index of last initial segment cached.
+
+        All fragments from the earliest to this are in cache.
+
+        @note A simple effort to minimize the cost of detecting a complete object.
+        In the normal case we'll get all the fragments in order so this will roll along nicely.
+        Otherwise we may have to do a lot of work on a single fragment, but that' still better
+        than doing it every time for every fragment.
+    */
+    uint32_t m_cached_idx;
+
+    /** Array operator for fragments in the table (1-based).
+        This is a bit tricky. The earliest fragment is special and so is @b not stored in this table.
+        To make that easier to deal with this array is one based so the containing object can simply
+        pass the index on if it's not 0 (earliest). From an external point of view the array of fragments
+        is zero based.
+     */
+    FragmentDescriptor &operator[](int idx);
+    /// Calculate the allocation size needed for a maximum array index of @a n.
+    static size_t calc_size(uint32_t n);
+  };
+
   HTTPCacheAlt();
+
   void copy(HTTPCacheAlt *to_copy);
-  void copy_frag_offsets_from(HTTPCacheAlt *src);
   void destroy();
 
   uint32_t m_magic;
 
-  // Writeable is set to true is we reside
-  //  in a buffer owned by this structure.
-  // INVARIANT: if own the buffer this HttpCacheAlt
-  //   we also own the buffers for the request &
-  //   response headers
-  int32_t m_writeable;
+  union {
+    uint32_t m_flags;
+    struct {
+      /** Do we own our own buffer?
+          @c true if the buffer containing this data is owned by this object.
+          INVARIANT: if we own this buffer then we also own the buffers for
+          @a m_request_hdr and @a m_response_hdr.
+      */
+      uint32_t writeable_p : 1;
+      /// Was this alternate originally stored as a partial object?
+      uint32_t composite_p : 1;
+      /// Did the origin tell us the actual length of the object?
+      uint32_t content_length_p : 1;
+      /// Are all fragments in cache?
+      uint32_t complete_p : 1;
+      /// Is the fragment table independently allocated?
+      uint32_t table_allocated_p : 1;
+      // Note - !composite_p => complete_p
+      //      - complete_p => content_length_p
+    } m_flag;
+  };
+
   int32_t m_unmarshal_len;
 
   int32_t m_id;
   int32_t m_rid;
 
-  int32_t m_object_key[4];
-  int32_t m_object_size[2];
+  /// # of fragments in the alternate, including the earliest fragment.
+  /// This can be zero for a resident alternate.
+  /// @internal In practice this is the high water mark for cached fragments.
+  /// Contrast with the @a m_cached_idx in the fragment table - that marks the high
+  /// water of contiguously cached fragments.
+  uint32_t m_frag_count;
+
+  /** The target size for fragments in this alternate.
+      This is @b mandatory if the object is being partially cached.
+      During read it should be used as a guideline but not considered definitive.
+  */
+  uint32_t m_fixed_fragment_size;
 
   HTTPHdr m_request_hdr;
   HTTPHdr m_response_hdr;
@@ -1351,21 +1679,23 @@ struct HTTPCacheAlt {
   time_t m_request_sent_time;
   time_t m_response_received_time;
 
-  /// # of fragment offsets in this alternate.
-  /// @note This is one less than the number of fragments.
-  int m_frag_offset_count;
-  /// Type of offset for a fragment.
-  typedef uint64_t FragOffset;
-  /// Table of fragment offsets.
-  /// @note The offsets are forward looking so that frag[0] is the
-  /// first byte past the end of fragment 0 which is also the first
-  /// byte of fragment 1. For this reason there is no fragment offset
-  /// for the last fragment.
-  FragOffset *m_frag_offsets;
-  /// # of fragment offsets built in to object.
-  static int const N_INTEGRAL_FRAG_OFFSETS = 4;
-  /// Integral fragment offset table.
-  FragOffset m_integral_frag_offsets[N_INTEGRAL_FRAG_OFFSETS];
+  /** Special case the first (earliest, non-resident) fragment.
+      This holds the key for the earliest fragment and the object size
+      by overloading the offset in this specific instance.
+  */
+  FragmentDescriptor m_earliest;
+
+  /** Descriptors for the rest of the fragments.
+      Because of this, index 0 in this array is really the next fragment after the
+      earliest fragment. We should have the invariant
+      ( @a m_fragments != 0) == ( @a m_frag_count > 1 )
+
+      @internal I thought of using @c std::vector here, but then we end up with either
+      doing 2 allocations (one for the @c std::vector and another for its contents) or
+      writing the @c std::vector container to disk (because this struct is directly
+      serialized). Instead we do our own memory management, which doesn't make me happy either.
+  */
+  FragmentDescriptorTable *m_fragments;
 
   // With clustering, our alt may be in cluster
   //  incoming channel buffer, when we are
@@ -1380,7 +1710,8 @@ struct HTTPCacheAlt {
 class HTTPInfo
 {
 public:
-  typedef HTTPCacheAlt::FragOffset FragOffset; ///< Import type.
+  typedef HTTPCacheAlt::FragmentDescriptor FragmentDescriptor;           ///< Import type.
+  typedef HTTPCacheAlt::FragmentDescriptorTable FragmentDescriptorTable; ///< Import type.
 
   HTTPCacheAlt *m_alt;
 
@@ -1408,7 +1739,6 @@ public:
   {
     m_alt = info->m_alt;
   }
-  void copy_frag_offsets_from(HTTPInfo *src);
   HTTPInfo &operator=(const HTTPInfo &m);
 
   inkcoreapi int marshal_length();
@@ -1439,9 +1769,9 @@ public:
     m_alt->m_rid = id;
   }
 
-  INK_MD5 object_key_get();
-  void object_key_get(INK_MD5 *);
-  bool compare_object_key(const INK_MD5 *);
+  CryptoHash const &object_key_get();
+  void object_key_get(CryptoHash *);
+  bool compare_object_key(const CryptoHash *);
   int64_t object_size_get();
 
   void
@@ -1483,7 +1813,7 @@ public:
     return m_alt->m_response_received_time;
   }
 
-  void object_key_set(INK_MD5 &md5);
+  void object_key_set(CryptoHash const &md5);
   void object_size_set(int64_t size);
 
   void
@@ -1508,14 +1838,66 @@ public:
     m_alt->m_response_received_time = t;
   }
 
+  bool
+  is_composite() const
+  {
+    return m_alt->m_flag.composite_p;
+  }
+  bool
+  is_complete() const
+  {
+    return m_alt->m_flag.complete_p;
+  }
+  bool
+  is_writeable() const
+  {
+    return m_alt->m_flag.writeable_p;
+  }
+
+  /** Compute the convex hull of uncached ranges.
+
+      If the resulting range has a minimum that is less than @a initial @b and the earliest fragment
+      is not cached then the minimum will be changed to zero. Alternatively, the initial uncached
+      segment must be at least @a initial bytes long.
+
+      @return An invalid range if all of the request is available in cache.
+  */
+  HTTPRangeSpec::Range get_uncached_hull(HTTPRangeSpec const &req ///< [in] UA request with content length applied
+                                         ,
+                                         int64_t initial ///< Minimize size for uncached initial data
+                                         );
+
   /// Get the fragment table.
-  FragOffset *get_frag_table();
-  /// Get the # of fragment offsets
-  /// @note This is the size of the fragment offset table, and one less
-  /// than the actual # of fragments.
-  int get_frag_offset_count();
-  /// Add an @a offset to the end of the fragment offset table.
-  void push_frag_offset(FragOffset offset);
+  /// @note There is a fragment table only for multi-fragment alternates @b and
+  /// the indexing starts with the second (non-earliest) fragment.
+  /// @deprecated - use specialized methods.
+  FragmentDescriptorTable *get_frag_table();
+
+  /// Force a descriptor at index @a idx.
+  FragmentDescriptor *force_frag_at(unsigned int idx);
+
+  /// Get the fragment index for @a offset.
+  int get_frag_index_of(int64_t offset);
+  /// Get the fragment key for an @a offset.
+  /// @note Forces fragment.
+  CryptoHash const &get_frag_key_of(int64_t offset);
+  /// Get the fragment key of the @a idx fragment.
+  /// @note Forces fragment.
+  CryptoHash const &get_frag_key(unsigned int idx);
+  /// Get the starting offset of a fragment.
+  int64_t get_frag_offset(unsigned int idx);
+
+  /// Get the number of fragments.
+  /// 0 means resident alternate, 1 means single fragment, > 1 means multi-fragment.
+  int get_frag_count() const;
+  /// Get the target fragment size.
+  uint32_t get_frag_fixed_size() const;
+  /// Mark a fragment at index @a idx as written to cache.
+  void mark_frag_write(unsigned int idx);
+  /// Check if a fragment is cached.
+  bool is_frag_cached(unsigned int idx) const;
+  /// Get the range of bytes for the fragments from @a low to @a high.
+  HTTPRangeSpec::Range get_range_for_frags(int low, int high);
 
   // Sanity check functions
   static bool check_marshalled(char *buf, int len);
@@ -1528,7 +1910,7 @@ inline void
 HTTPInfo::destroy()
 {
   if (m_alt) {
-    if (m_alt->m_writeable) {
+    if (m_alt->m_flag.writeable_p) {
       m_alt->destroy();
     } else if (m_alt->m_ext_buffer) {
       if (m_alt->m_ext_buffer->refcount_dec() == 0) {
@@ -1545,77 +1927,307 @@ inline HTTPInfo &HTTPInfo::operator=(const HTTPInfo &m)
   return *this;
 }
 
-inline INK_MD5
+inline CryptoHash const &
 HTTPInfo::object_key_get()
 {
-  INK_MD5 val;
-  int32_t *pi = reinterpret_cast<int32_t *>(&val);
-
-  pi[0] = m_alt->m_object_key[0];
-  pi[1] = m_alt->m_object_key[1];
-  pi[2] = m_alt->m_object_key[2];
-  pi[3] = m_alt->m_object_key[3];
-
-  return val;
+  return m_alt->m_earliest.m_key;
 }
 
 inline void
-HTTPInfo::object_key_get(INK_MD5 *md5)
+HTTPInfo::object_key_get(CryptoHash *key)
 {
-  int32_t *pi = reinterpret_cast<int32_t *>(md5);
-  pi[0] = m_alt->m_object_key[0];
-  pi[1] = m_alt->m_object_key[1];
-  pi[2] = m_alt->m_object_key[2];
-  pi[3] = m_alt->m_object_key[3];
+  memcpy(key, &(m_alt->m_earliest.m_key), sizeof(*key));
 }
 
 inline bool
-HTTPInfo::compare_object_key(const INK_MD5 *md5)
+HTTPInfo::compare_object_key(const CryptoHash *key)
 {
-  int32_t const *pi = reinterpret_cast<int32_t const *>(md5);
-  return ((m_alt->m_object_key[0] == pi[0]) && (m_alt->m_object_key[1] == pi[1]) && (m_alt->m_object_key[2] == pi[2]) &&
-          (m_alt->m_object_key[3] == pi[3]));
+  return *key == m_alt->m_earliest.m_key;
 }
 
 inline int64_t
 HTTPInfo::object_size_get()
 {
-  int64_t val;
-  int32_t *pi = reinterpret_cast<int32_t *>(&val);
-
-  pi[0] = m_alt->m_object_size[0];
-  pi[1] = m_alt->m_object_size[1];
-  return val;
+  return m_alt->m_earliest.m_offset;
 }
 
 inline void
-HTTPInfo::object_key_set(INK_MD5 &md5)
+HTTPInfo::object_key_set(CryptoHash const &md5)
 {
-  int32_t *pi = reinterpret_cast<int32_t *>(&md5);
-  m_alt->m_object_key[0] = pi[0];
-  m_alt->m_object_key[1] = pi[1];
-  m_alt->m_object_key[2] = pi[2];
-  m_alt->m_object_key[3] = pi[3];
+  m_alt->m_earliest.m_key = md5;
 }
 
 inline void
 HTTPInfo::object_size_set(int64_t size)
 {
-  int32_t *pi = reinterpret_cast<int32_t *>(&size);
-  m_alt->m_object_size[0] = pi[0];
-  m_alt->m_object_size[1] = pi[1];
+  m_alt->m_earliest.m_offset = size;
+  m_alt->m_flag.content_length_p = true;
+  // Invariant - if a fragment is cached, all of that fragment is cached.
+  // Therefore if the last byte is in the initial cached fragments all of the data is cached.
+  if (!m_alt->m_flag.complete_p) {
+    int64_t mco = 0; // maximum cached offset + 1
+    if (m_alt->m_fragments) {
+      if (m_alt->m_fragments->m_cached_idx >= 0)
+        mco = this->get_frag_offset(m_alt->m_fragments->m_cached_idx) + this->get_frag_fixed_size();
+    } else if (m_alt->m_earliest.m_flag.cached_p) {
+      mco = this->get_frag_fixed_size();
+    }
+    if (mco > size)
+      m_alt->m_flag.complete_p = true;
+  }
 }
 
-inline HTTPInfo::FragOffset *
+inline HTTPInfo::FragmentDescriptorTable *
 HTTPInfo::get_frag_table()
 {
-  return m_alt ? m_alt->m_frag_offsets : 0;
+  return m_alt ? m_alt->m_fragments : 0;
 }
 
 inline int
-HTTPInfo::get_frag_offset_count()
+HTTPInfo::get_frag_count() const
+{
+  return m_alt ? m_alt->m_frag_count : 0;
+}
+
+inline uint32_t
+HTTPInfo::get_frag_fixed_size() const
+{
+  return m_alt ? m_alt->m_fixed_fragment_size : 0;
+}
+
+inline CryptoHash const &
+HTTPInfo::get_frag_key_of(int64_t offset)
+{
+  return this->get_frag_key(this->get_frag_index_of(offset));
+}
+
+inline CryptoHash const &
+HTTPInfo::get_frag_key(unsigned int idx)
+{
+  return 0 == idx ? m_alt->m_earliest.m_key : this->force_frag_at(idx)->m_key;
+}
+
+inline int64_t
+HTTPInfo::get_frag_offset(unsigned int idx)
+{
+  return 0 == idx ? 0 : (*m_alt->m_fragments)[idx].m_offset;
+}
+
+inline bool
+HTTPInfo::is_frag_cached(unsigned int idx) const
+{
+  return m_alt && ((0 == idx && m_alt->m_earliest.m_flag.cached_p) ||
+                   (m_alt->m_fragments && idx < m_alt->m_fragments->m_n && (*m_alt->m_fragments)[idx].m_flag.cached_p));
+}
+
+inline HTTPRangeSpec::HTTPRangeSpec() : _state(EMPTY)
+{
+}
+
+inline void
+HTTPRangeSpec::clear()
+{
+  _state = EMPTY;
+  RangeBox().swap(_ranges); // force memory drop.
+}
+
+inline bool
+HTTPRangeSpec::isSingle() const
+{
+  return SINGLE == _state;
+}
+
+inline bool
+HTTPRangeSpec::isMulti() const
+{
+  return MULTI == _state;
+}
+
+inline bool
+HTTPRangeSpec::isEmpty() const
+{
+  return EMPTY == _state;
+}
+
+inline bool
+HTTPRangeSpec::isUnsatisfied() const
+{
+  return UNSATISFIABLE == _state;
+}
+
+inline size_t
+HTTPRangeSpec::count() const
+{
+  return SINGLE == _state ? 1 : _ranges.size();
+}
+
+inline bool
+HTTPRangeSpec::hasRanges() const
+{
+  return SINGLE == _state || MULTI == _state;
+}
+
+inline bool
+HTTPRangeSpec::isValid() const
+{
+  return SINGLE == _state || MULTI == _state || EMPTY == _state;
+}
+
+inline HTTPRangeSpec::Range &
+HTTPRangeSpec::Range::invalidate()
+{
+  _min = UINT64_MAX;
+  _max = 1;
+  return *this;
+}
+
+inline bool
+HTTPRangeSpec::Range::isSuffix() const
+{
+  return 0 == _max && _min > 0;
+}
+
+inline bool
+HTTPRangeSpec::Range::isValid() const
+{
+  return _min <= _max || this->isSuffix();
+}
+
+inline uint64_t
+HTTPRangeSpec::Range::size() const
+{
+  return 1 + (_max - _min);
+}
+
+inline uint64_t
+HTTPRangeSpec::size() const
+{
+  uint64_t size = 0;
+  if (this->isSingle())
+    size = _single.size();
+  else if (this->isMulti()) {
+    for (RangeBox::const_iterator spot = _ranges.begin(), limit = _ranges.end(); spot != limit; ++spot)
+      size += spot->size();
+  }
+  return size;
+}
+
+inline bool
+HTTPRangeSpec::Range::apply(uint64_t len)
+{
+  ink_assert(len > 0);
+  bool zret = true; // is this range satisfiable for @a len?
+  if (this->isSuffix()) {
+    _max = len - 1;
+    _min = _min > len ? 0 : len - _min;
+  } else if (_min < len) {
+    _max = MIN(_max, len - 1);
+  } else {
+    this->invalidate();
+    zret = false;
+  }
+  return zret;
+}
+
+inline HTTPRangeSpec &
+HTTPRangeSpec::add(uint64_t low, uint64_t high)
+{
+  return this->add(Range(low, high));
+}
+
+inline HTTPRangeSpec::Range &HTTPRangeSpec::operator[](int n)
+{
+  return SINGLE == _state ? _single : _ranges[n];
+}
+
+inline HTTPRangeSpec::Range const &HTTPRangeSpec::operator[](int n) const
+{
+  return SINGLE == _state ? _single : _ranges[n];
+}
+
+inline HTTPRangeSpec::iterator
+HTTPRangeSpec::begin()
+{
+  switch (_state) {
+  case SINGLE:
+    return &_single;
+  case MULTI:
+    return &(*(_ranges.begin()));
+  default:
+    return NULL;
+  }
+}
+
+inline HTTPRangeSpec::iterator
+HTTPRangeSpec::end()
+{
+  switch (_state) {
+  case SINGLE:
+    return (&_single) + 1;
+  case MULTI:
+    return &(*(_ranges.end()));
+  default:
+    return NULL;
+  }
+}
+
+inline HTTPRangeSpec::const_iterator
+HTTPRangeSpec::begin() const
+{
+  return const_cast<self *>(this)->begin();
+}
+
+inline HTTPRangeSpec::const_iterator
+HTTPRangeSpec::end() const
+{
+  return const_cast<self *>(this)->end();
+}
+
+inline HTTPRangeSpec::Range
+HTTPRangeSpec::getConvexHull() const
+{
+  Range zret;
+  // Compute the convex hull of the original in fragment indices.
+  for (const_iterator spot = this->begin(), limit = this->end(); spot != limit; ++spot) {
+    if (spot->_min < zret._min)
+      zret._min = spot->_min;
+    if (spot->_max > zret._max)
+      zret._max = spot->_max;
+  }
+  return zret;
+}
+
+inline HTTPCacheAlt::FragmentDescriptor &HTTPCacheAlt::FragmentDescriptorTable::operator[](int idx)
+{
+  ink_assert(idx > 0);
+  return *(reinterpret_cast<FragmentDescriptor *>(reinterpret_cast<char *>(this + 1) + sizeof(FragmentDescriptor) * (idx - 1)));
+}
+
+inline size_t
+HTTPCacheAlt::FragmentDescriptorTable::calc_size(uint32_t n)
+{
+  return n < 1 ? 0 : sizeof(FragmentDescriptorTable) + n * sizeof(FragmentDescriptor);
+}
+
+#if 0
+inline
+HTTPCacheAlt::FragmentAccessor::FragmentAccessor(HTTPCacheAlt* alt)
+             : _alt(alt), _table(alt->m_fragments)
+{
+}
+
+inline HTTPCacheAlt::FragmentDescriptor&
+HTTPCacheAlt::FragmentAccessor::operator [] (int idx)
+{
+  ink_assert(idx >= 0);
+  return idx == 0 ? _alt->m_earliest : (*_table)[idx];
+}
+
+inline uint32_t
+HTTPCacheAlt::FragmentAccessor::get_initial_cached_index() const
 {
-  return m_alt ? m_alt->m_frag_offset_count : 0;
+  return _table ? _table->m_cached_idx : 0;
 }
+#endif
 
 #endif /* __HTTP_H__ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpCacheSM.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpCacheSM.cc b/proxy/http/HttpCacheSM.cc
index c259b38..2ba4aee 100644
--- a/proxy/http/HttpCacheSM.cc
+++ b/proxy/http/HttpCacheSM.cc
@@ -267,6 +267,37 @@ HttpCacheSM::open_read(const HttpCacheKey *key, URL *url, HTTPHdr *hdr, CacheLoo
   }
 }
 
+int
+HttpCacheSM::state_cache_open_partial_read(int evid, void *data)
+{
+  if (!open_read_cb)
+    return this->state_cache_open_read(evid, data);
+  Debug("amc", "[HttpCacheSM::state_cache_open_partial_read] second round");
+  return VC_EVENT_DONE;
+}
+
+Action *
+HttpCacheSM::open_partial_read(HTTPHdr *client_request_hdr)
+{
+  // Simple because this requires an active write VC so we know the object is there (no retries).
+  ink_assert(NULL != cache_write_vc);
+
+  // If this is a partial fill there will be a cache read VC. Resetting it to be used is challenging
+  // because it requires digging in to the internals of the VC or expanding its interface. At present
+  // it's better to just close it and re-open one that we know is valid with regard to the write VC.
+  this->close_read();
+
+  SET_HANDLER(&HttpCacheSM::state_cache_open_partial_read);
+  open_read_cb = false;
+
+  Action *action_handle = cacheProcessor.open_read(this, cache_write_vc, client_request_hdr);
+
+  if (action_handle != ACTION_RESULT_DONE)
+    pending_action = action_handle;
+
+  return open_read_cb ? ACTION_RESULT_DONE : &captive_action;
+}
+
 Action *
 HttpCacheSM::open_write(const HttpCacheKey *key, URL *url, HTTPHdr *request, CacheHTTPInfo *old_info, time_t pin_in_cache,
                         bool retry, bool allow_multiple)


[8/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
TS-974: Partial Object Caching.


Project: http://git-wip-us.apache.org/repos/asf/trafficserver/repo
Commit: http://git-wip-us.apache.org/repos/asf/trafficserver/commit/1c06db83
Tree: http://git-wip-us.apache.org/repos/asf/trafficserver/tree/1c06db83
Diff: http://git-wip-us.apache.org/repos/asf/trafficserver/diff/1c06db83

Branch: refs/heads/poc-6-0-x
Commit: 1c06db83123c336d462083002eda9ffcd4852730
Parents: a0b75bc
Author: Alan M. Carroll <so...@yahoo-inc.com>
Authored: Wed Mar 25 20:13:14 2015 -0500
Committer: Alan M. Carroll <so...@yahoo-inc.com>
Committed: Sun Jul 19 08:34:30 2015 -0500

----------------------------------------------------------------------
 cmd/traffic_cop/traffic_cop.cc              |  29 +-
 cmd/traffic_crashlog/procinfo.cc            |  12 +-
 cmd/traffic_crashlog/traffic_crashlog.cc    |   6 +-
 cmd/traffic_crashlog/traffic_crashlog.h     |   3 +-
 cmd/traffic_ctl/alarm.cc                    |   3 +-
 cmd/traffic_ctl/metric.cc                   |   6 +-
 cmd/traffic_ctl/traffic_ctl.h               |   3 +-
 cmd/traffic_line/traffic_line.cc            |  29 +-
 cmd/traffic_manager/AddConfigFilesHere.cc   |   1 -
 cmd/traffic_manager/StatProcessor.cc        |  11 -
 cmd/traffic_manager/StatProcessor.h         |   1 -
 cmd/traffic_manager/StatType.cc             |  36 -
 cmd/traffic_manager/StatType.h              |   2 -
 cmd/traffic_manager/StatXML.cc              |   1 -
 cmd/traffic_manager/StatXML.h               |   1 -
 cmd/traffic_manager/WebOverview.cc          |  13 +-
 cmd/traffic_top/stats.h                     |  12 +-
 cmd/traffic_top/traffic_top.cc              |  26 +-
 cmd/traffic_via/traffic_via.cc              |  23 +-
 doc/arch/cache/cache-data-structures.en.rst |   3 +-
 iocore/cache/Cache.cc                       |  76 ++-
 iocore/cache/CacheDir.cc                    | 132 ++--
 iocore/cache/CacheHttp.cc                   | 325 ++++++++-
 iocore/cache/CacheRead.cc                   | 713 +++++++++----------
 iocore/cache/CacheTest.cc                   |   4 +-
 iocore/cache/CacheVol.cc                    |   5 +-
 iocore/cache/CacheWrite.cc                  | 417 +++++++++---
 iocore/cache/I_Cache.h                      |  72 ++
 iocore/cache/I_CacheDefs.h                  |   4 +-
 iocore/cache/P_CacheBC.h                    |  30 +-
 iocore/cache/P_CacheDir.h                   |  97 ++-
 iocore/cache/P_CacheHttp.h                  | 287 +++++++-
 iocore/cache/P_CacheInternal.h              | 105 ++-
 iocore/cache/P_CacheVol.h                   |  10 +-
 iocore/cluster/ClusterCache.cc              |   2 +-
 iocore/cluster/ClusterVConnection.cc        |   1 +
 iocore/cluster/P_Cluster.h                  |   3 +-
 iocore/cluster/P_ClusterCache.h             |  22 +
 lib/ts/CryptoHash.h                         |  47 +-
 lib/ts/InkErrno.h                           |   2 +
 lib/ts/ParseRules.cc                        |  18 +
 lib/ts/ParseRules.h                         |  20 +
 lib/ts/TsBuffer.h                           |  97 ++-
 lib/ts/ink_code.cc                          |  29 +
 mgmt/Alarms.cc                              |  13 +-
 mgmt/Alarms.h                               |   1 -
 mgmt/BaseManager.cc                         |   5 -
 mgmt/BaseManager.h                          |   4 -
 mgmt/FileManager.cc                         |  40 +-
 mgmt/LocalManager.cc                        |  82 ++-
 mgmt/MultiFile.cc                           |   1 -
 mgmt/ProcessManager.cc                      |  22 +-
 mgmt/ProxyConfig.h                          |   6 +-
 mgmt/RecordsConfig.h                        |   4 +-
 mgmt/RecordsConfigUtils.cc                  |  21 +-
 mgmt/Rollback.cc                            |  45 +-
 mgmt/Rollback.h                             |   3 +-
 mgmt/WebMgmtUtils.cc                        |  10 +-
 mgmt/api/APITestCliRemote.cc                |  64 +-
 mgmt/api/CfgContextImpl.cc                  |  19 +-
 mgmt/api/CfgContextManager.cc               |   5 -
 mgmt/api/CfgContextManager.h                |   2 -
 mgmt/api/CfgContextUtils.cc                 |  19 +-
 mgmt/api/CfgContextUtils.h                  |   2 -
 mgmt/api/CoreAPI.cc                         |   7 +-
 mgmt/api/CoreAPI.h                          |   4 +-
 mgmt/api/CoreAPIRemote.cc                   |   3 +-
 mgmt/api/CoreAPIShared.cc                   |  20 +-
 mgmt/api/EventCallback.cc                   |  12 +-
 mgmt/api/EventControlMain.cc                |  27 +-
 mgmt/api/GenericParser.cc                   |  25 +-
 mgmt/api/GenericParser.h                    |   1 -
 mgmt/api/INKMgmtAPI.cc                      |   9 +-
 mgmt/api/NetworkMessage.cc                  |   9 +-
 mgmt/api/NetworkMessage.h                   |   9 +-
 mgmt/api/NetworkUtilsLocal.cc               |   6 +-
 mgmt/api/NetworkUtilsRemote.cc              |  15 +-
 mgmt/api/NetworkUtilsRemote.h               |   3 +-
 mgmt/api/TSControlMain.cc                   |  24 +-
 mgmt/cluster/ClusterCom.cc                  | 166 ++---
 mgmt/cluster/ClusterCom.h                   |  17 +-
 mgmt/cluster/VMap.cc                        |  58 +-
 mgmt/cluster/VMap.h                         |   6 +-
 mgmt/utils/MgmtHashTable.h                  |   1 -
 mgmt/utils/MgmtMarshall.h                   |  12 +-
 mgmt/utils/MgmtUtils.cc                     |   3 +-
 mgmt/utils/test_marshall.cc                 |   6 +-
 proxy/ControlMatcher.cc                     |   9 +-
 proxy/ControlMatcher.h                      |   6 +-
 proxy/CoreUtils.cc                          |  13 +-
 proxy/Crash.cc                              |  12 +-
 proxy/DynamicStats.h                        |  13 +-
 proxy/EventName.cc                          |   4 -
 proxy/FetchSM.cc                            |  12 +-
 proxy/ICP.cc                                |   2 -
 proxy/ICPConfig.cc                          |   8 +-
 proxy/ICPProcessor.cc                       |   1 -
 proxy/ICPProcessor.h                        |   1 -
 proxy/ICPlog.h                              |   1 -
 proxy/IPAllow.cc                            |   7 +-
 proxy/InkAPITest.cc                         | 307 ++++-----
 proxy/InkAPITestTool.cc                     |  20 +-
 proxy/InkIOCoreAPI.cc                       |   7 -
 proxy/Main.h                                |   1 -
 proxy/ParentSelection.cc                    |  49 +-
 proxy/Plugin.cc                             |   7 +-
 proxy/PluginVC.cc                           |   9 +-
 proxy/Prefetch.cc                           |  61 +-
 proxy/Prefetch.h                            |   1 -
 proxy/ProtoSM.h                             |   1 -
 proxy/ProtocolProbeSessionAccept.cc         |  12 +-
 proxy/ProtocolProbeSessionAccept.h          |   3 +-
 proxy/ProxyClientSession.h                  |   6 +-
 proxy/ReverseProxy.h                        |   3 +-
 proxy/Show.h                                |   1 -
 proxy/StatSystem.cc                         |   7 +-
 proxy/TestClusterHash.cc                    |   1 -
 proxy/TestPreProc.cc                        |   2 -
 proxy/TestProxy.cc                          |   1 -
 proxy/TestSimpleProxy.cc                    |   1 -
 proxy/TimeTrace.h                           |   1 -
 proxy/Transform.h                           |   1 -
 proxy/UDPAPIClientTest.cc                   |   2 -
 proxy/api/ts/InkAPIPrivateIOCore.h          |   1 -
 proxy/api/ts/remap.h                        |  40 +-
 proxy/api/ts/ts.h                           |  76 ++-
 proxy/hdrs/HTTP.cc                          | 798 ++++++++++++++++++----
 proxy/hdrs/HTTP.h                           | 826 ++++++++++++++++++++---
 proxy/http/HttpCacheSM.cc                   |  31 +
 proxy/http/HttpCacheSM.h                    |   9 +-
 proxy/http/HttpDebugNames.cc                |   2 +
 proxy/http/HttpSM.cc                        | 280 +++++---
 proxy/http/HttpSM.h                         |   4 +-
 proxy/http/HttpTransact.cc                  | 209 ++++--
 proxy/http/HttpTransact.h                   |  93 ++-
 proxy/http/HttpTransactHeaders.cc           |  13 +
 proxy/http/HttpTransactHeaders.h            |   1 +
 proxy/http/HttpTunnel.cc                    |  14 +-
 proxy/http2/HPACK.cc                        |  10 +-
 proxy/http2/HPACK.h                         |   9 +-
 proxy/http2/HTTP2.cc                        |  25 +-
 proxy/http2/HTTP2.h                         |  35 +-
 proxy/http2/Http2ClientSession.cc           |  18 +-
 proxy/http2/Http2ClientSession.h            |   4 +-
 proxy/http2/Http2ConnectionState.h          |  13 +-
 proxy/http2/Http2SessionAccept.cc           |  11 +-
 proxy/http2/Http2SessionAccept.h            |  12 +-
 proxy/logging/Log.cc                        |  29 +-
 proxy/logging/Log.h                         |   4 +-
 proxy/logging/LogAccess.cc                  |  11 +-
 proxy/logging/LogAccessHttp.cc              |  18 +-
 proxy/logging/LogAccessHttp.h               |   1 -
 proxy/logging/LogAccessICP.h                |   1 -
 proxy/logging/LogAccessTest.h               |   1 -
 proxy/logging/LogBuffer.cc                  |   3 -
 proxy/logging/LogBuffer.h                   |  10 +-
 proxy/logging/LogCollationHostSM.cc         |   3 +-
 proxy/logging/LogConfig.h                   |   5 -
 proxy/logging/LogField.cc                   |   5 -
 proxy/logging/LogField.h                    |   3 -
 proxy/logging/LogFieldAliasMap.h            |   6 +-
 proxy/logging/LogFile.cc                    |   4 +-
 proxy/logging/LogFile.h                     |   1 -
 proxy/logging/LogFilter.cc                  |  16 +-
 proxy/logging/LogFilter.h                   |   3 -
 proxy/logging/LogFormat.cc                  |  17 +-
 proxy/logging/LogFormat.h                   |   3 +-
 proxy/logging/LogObject.cc                  |  46 +-
 proxy/logging/LogObject.h                   |   4 +-
 proxy/logging/LogSock.h                     |   1 -
 proxy/logging/LogStandalone.cc              |   2 -
 proxy/logging/LogUtils.cc                   |   6 +-
 proxy/logstats.cc                           |  45 +-
 173 files changed, 4738 insertions(+), 2187 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_cop/traffic_cop.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_cop/traffic_cop.cc b/cmd/traffic_cop/traffic_cop.cc
index e3476fe..55778ff 100644
--- a/cmd/traffic_cop/traffic_cop.cc
+++ b/cmd/traffic_cop/traffic_cop.cc
@@ -202,7 +202,6 @@ cop_log(int priority, const char *format, ...)
   va_end(args);
 }
 
-
 void
 chown_file_to_admin_user(const char *file)
 {
@@ -412,7 +411,6 @@ safe_kill(const char *lockfile_name, const char *pname, bool group)
   cop_log_trace("Leaving safe_kill(%s, %s, %d)\n", lockfile_name, pname, group);
 }
 
-
 // ink_hrtime milliseconds()
 //
 // Returns the result of gettimeofday converted to
@@ -817,7 +815,6 @@ spawn_manager()
   cop_log_trace("Leaving spawn_manager()\n");
 }
 
-
 static int
 poll_read_or_write(int fd, int timeout, int inorout)
 {
@@ -917,7 +914,8 @@ open_socket(int port, const char *ip = NULL, char const *ip_to_bind = NULL)
         ((sockaddr_in6 *)result_to_bind->ai_addr)->sin6_port = htons(source_port);
       }
 
-      // also set REUSEADDR so that previous cop connections in the TIME_WAIT state
+      // also set REUSEADDR so that previous cop connections in the TIME_WAIT
+      // state
       // do not interfere
       if (safe_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, SOCKOPT_ON, sizeof(int)) < 0) {
         cop_log(COP_WARNING, "(test) unable to set REUSEADDR socket option [%d '%s']\n", errno, strerror(errno));
@@ -1176,7 +1174,6 @@ read_mgmt_cli_int(const char *variable, int *value)
   return 0;
 }
 
-
 static int
 test_rs_port()
 {
@@ -1196,7 +1193,6 @@ test_rs_port()
   return 0;
 }
 
-
 static int
 test_mgmt_cli_port()
 {
@@ -1218,7 +1214,6 @@ test_mgmt_cli_port()
   return ret;
 }
 
-
 static int
 test_http_port(int port, char *request, int timeout, char const *ip = NULL, char const *ip_to_bind = NULL)
 {
@@ -1301,7 +1296,8 @@ heartbeat_manager()
     err = test_rs_port();
 
   if (err < 0) {
-    // See heartbeat_server()'s comments for how we determine a server/manager failure.
+    // See heartbeat_server()'s comments for how we determine a server/manager
+    // failure.
     manager_failures += 1;
     cop_log(COP_WARNING, "manager heartbeat [variable] failed [%d]\n", manager_failures);
 
@@ -1401,7 +1397,6 @@ server_up()
   }
 }
 
-
 //         |  state  |  status  |  action
 // --------|---------|----------|---------------
 // manager |   up    |    ok    |  nothing
@@ -1416,7 +1411,6 @@ server_up()
 // manager |   up    |    ok    |  kill server
 // server  |   up    |    bad   |
 
-
 static void
 check_programs()
 {
@@ -1653,7 +1647,8 @@ check(void *arg)
     // the SIGALRM signal which we use to heartbeat the cop.
     millisleep(sleep_time * 1000);
 
-    // We do this after the first round of checks, since the first "check" will spawn traffic_manager
+    // We do this after the first round of checks, since the first "check" will
+    // spawn traffic_manager
     if (!mgmt_init) {
       ats_scoped_str runtimedir(config_read_runtime_dir());
       TSInit(runtimedir, static_cast<TSInitOptionT>(TS_MGMT_OPT_NO_EVENTS));
@@ -1673,7 +1668,6 @@ check(void *arg)
   return arg;
 }
 
-
 static void
 check_lockfile()
 {
@@ -1795,8 +1789,9 @@ init_config_file()
   if (stat(config_file, &info) < 0) {
     Layout::relative_to(config_file, sizeof(config_file), config_dir, "records.config");
     if (stat(config_file, &info) < 0) {
-      cop_log(COP_FATAL, "unable to locate \"%s/records.config\" or \"%s/records.config.shadow\"\n", (const char *)config_dir,
-              (const char *)config_dir);
+      cop_log(COP_FATAL, "unable to locate \"%s/records.config\" or "
+                         "\"%s/records.config.shadow\"\n",
+              (const char *)config_dir, (const char *)config_dir);
       exit(1);
     }
   }
@@ -1810,7 +1805,8 @@ init()
 
   cop_log_trace("Entering init()\n");
 
-  // Start up the records store and load the defaults so that we can locate our configuration.
+  // Start up the records store and load the defaults so that we can locate our
+  // configuration.
   RecConfigFileInit();
   RecordsConfigIterate(config_register_default, NULL);
 
@@ -1823,7 +1819,8 @@ init()
   runtime_dir = config_read_runtime_dir();
   if (stat(runtime_dir, &info) < 0) {
     cop_log(COP_FATAL, "unable to locate local state directory '%s'\n", runtime_dir);
-    cop_log(COP_FATAL, " please try setting correct root path in either env variable TS_ROOT \n");
+    cop_log(COP_FATAL, " please try setting correct root path in either env "
+                       "variable TS_ROOT \n");
     exit(1);
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_crashlog/procinfo.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_crashlog/procinfo.cc b/cmd/traffic_crashlog/procinfo.cc
index cb15b87..b8a9414 100644
--- a/cmd/traffic_crashlog/procinfo.cc
+++ b/cmd/traffic_crashlog/procinfo.cc
@@ -146,9 +146,12 @@ crashlog_write_backtrace(FILE *fp, const crashlog_target &)
   TSString trace = NULL;
   TSMgmtError mgmterr;
 
-  // NOTE: sometimes we can't get a backtrace because the ptrace attach will fail with
-  // EPERM. I've seen this happen when a debugger is attached, which makes sense, but it
-  // can also happen without a debugger. Possibly in that case, there is a race with the
+  // NOTE: sometimes we can't get a backtrace because the ptrace attach will
+  // fail with
+  // EPERM. I've seen this happen when a debugger is attached, which makes
+  // sense, but it
+  // can also happen without a debugger. Possibly in that case, there is a race
+  // with the
   // kernel locking the process information?
 
   if ((mgmterr = TSProxyBacktraceGet(0, &trace)) != TS_ERR_OKAY) {
@@ -177,7 +180,8 @@ crashlog_write_records(FILE *fp, const crashlog_target &)
     goto done;
   }
 
-  // If the RPC call failed, the list will be empty, so we won't print anything. Otherwise,
+  // If the RPC call failed, the list will be empty, so we won't print anything.
+  // Otherwise,
   // print all the results, freeing them as we go.
   for (TSRecordEle *rec_ele = (TSRecordEle *)TSListDequeue(list); rec_ele; rec_ele = (TSRecordEle *)TSListDequeue(list)) {
     if (!success) {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_crashlog/traffic_crashlog.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_crashlog/traffic_crashlog.cc b/cmd/traffic_crashlog/traffic_crashlog.cc
index 7c5599f..1460488 100644
--- a/cmd/traffic_crashlog/traffic_crashlog.cc
+++ b/cmd/traffic_crashlog/traffic_crashlog.cc
@@ -101,8 +101,10 @@ main(int /* argc ATS_UNUSED */, const char **argv)
     kill(getpid(), SIGSTOP);
   }
 
-  // XXX This is a hack. traffic_manager starts traffic_server with the euid of the admin user. We are still
-  // privileged, but won't be able to open files in /proc or ptrace the target. This really should be fixed
+  // XXX This is a hack. traffic_manager starts traffic_server with the euid of
+  // the admin user. We are still
+  // privileged, but won't be able to open files in /proc or ptrace the target.
+  // This really should be fixed
   // in traffic_manager.
   if (getuid() == 0) {
     ATS_UNUSED_RETURN(seteuid(0));

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_crashlog/traffic_crashlog.h
----------------------------------------------------------------------
diff --git a/cmd/traffic_crashlog/traffic_crashlog.h b/cmd/traffic_crashlog/traffic_crashlog.h
index 0b066d1..6da4128 100644
--- a/cmd/traffic_crashlog/traffic_crashlog.h
+++ b/cmd/traffic_crashlog/traffic_crashlog.h
@@ -27,7 +27,8 @@
 #include "libts.h"
 #include "mgmtapi.h"
 
-// ucontext.h is deprecated on Darwin, and we really only need it on Linux, so only
+// ucontext.h is deprecated on Darwin, and we really only need it on Linux, so
+// only
 // include it if we are planning to use it.
 #if defined(__linux__)
 #include <ucontext.h>

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_ctl/alarm.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_ctl/alarm.cc b/cmd/traffic_ctl/alarm.cc
index c341ffa..88754e9 100644
--- a/cmd/traffic_ctl/alarm.cc
+++ b/cmd/traffic_ctl/alarm.cc
@@ -128,7 +128,8 @@ subcommand_alarm(unsigned argc, const char **argv)
     {alarm_clear, "clear", "Clear all current alarms"},
     {alarm_list, "list", "List all current alarms"},
 
-    // Note that we separate resolve one from resolve all for the same reasons that
+    // Note that we separate resolve one from resolve all for the same reasons
+    // that
     // we have "metric zero" and "metric clear".
     {alarm_resolve, "resolve", "Resolve the listed alarms"},
     /* XXX describe a specific alarm? */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_ctl/metric.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_ctl/metric.cc b/cmd/traffic_ctl/metric.cc
index 36a8b6e..9c8d124 100644
--- a/cmd/traffic_ctl/metric.cc
+++ b/cmd/traffic_ctl/metric.cc
@@ -132,8 +132,10 @@ subcommand_metric(unsigned argc, const char **argv)
     {metric_match, "match", "Get metrics matching a regular expression"},
     {CtrlUnimplementedCommand, "monitor", "Display the value of a metric over time"},
 
-    // We could allow clearing all the metrics in the "clear" subcommand, but that seems error-prone. It
-    // would be too easy to just expect a help message and accidentally nuke all the metrics.
+    // We could allow clearing all the metrics in the "clear" subcommand, but
+    // that seems error-prone. It
+    // would be too easy to just expect a help message and accidentally nuke all
+    // the metrics.
     {metric_zero, "zero", "Clear one or more metric values"},
   };
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_ctl/traffic_ctl.h
----------------------------------------------------------------------
diff --git a/cmd/traffic_ctl/traffic_ctl.h b/cmd/traffic_ctl/traffic_ctl.h
index b77ee40..b83c60e 100644
--- a/cmd/traffic_ctl/traffic_ctl.h
+++ b/cmd/traffic_ctl/traffic_ctl.h
@@ -193,7 +193,8 @@ struct CtrlCommandLine {
       this->args.push_back(argv[i]);
     }
 
-    // Always NULL-terminate to keep ink_args happy. Note that we adjust arg() accordingly.
+    // Always NULL-terminate to keep ink_args happy. Note that we adjust arg()
+    // accordingly.
     this->args.push_back(NULL);
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_line/traffic_line.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_line/traffic_line.cc b/cmd/traffic_line/traffic_line.cc
index 0c10839..743e69d 100644
--- a/cmd/traffic_line/traffic_line.cc
+++ b/cmd/traffic_line/traffic_line.cc
@@ -90,13 +90,16 @@ handleArgInvocation()
     TSRecordEleDestroy(rec_ele);
     return TSStatsReset(*ZeroCluster ? true : false, name);
   } else if (QueryDeadhosts == 1) {
-    fprintf(stderr, "Query Deadhosts is not implemented, it requires support for congestion control.\n");
-    fprintf(stderr, "For more details, examine the old code in cli/CLI.cc: QueryDeadhosts()\n");
+    fprintf(stderr, "Query Deadhosts is not implemented, it requires support "
+                    "for congestion control.\n");
+    fprintf(stderr, "For more details, examine the old code in cli/CLI.cc: "
+                    "QueryDeadhosts()\n");
     return TS_ERR_FAIL;
   } else if (*StorageCmdOffline) {
     return TSStorageDeviceCmdOffline(StorageCmdOffline);
   } else if (ShowAlarms == 1) {
-    // Show all active alarms, this was moved from the old traffic_shell implementation (show:alarms).
+    // Show all active alarms, this was moved from the old traffic_shell
+    // implementation (show:alarms).
     TSList events = TSListCreate();
 
     if (TS_ERR_OKAY != TSActiveEventGetMlt(events)) {
@@ -119,7 +122,8 @@ handleArgInvocation()
     TSListDestroy(events);
     return TS_ERR_OKAY;
   } else if (*ClearAlarms != '\0') {
-    // Clear (some) active alarms, this was moved from the old traffic_shell implementation (config:alarm)
+    // Clear (some) active alarms, this was moved from the old traffic_shell
+    // implementation (config:alarm)
     TSList events = TSListCreate();
     size_t len = strlen(ClearAlarms);
 
@@ -189,7 +193,9 @@ handleArgInvocation()
     return err;
   } else if (*ReadVar != '\0') { // Handle a value read
     if (*SetVar != '\0' || *VarValue != '\0') {
-      fprintf(stderr, "%s: Invalid Argument Combination: Can not read and set values at the same time\n", program_name);
+      fprintf(stderr, "%s: Invalid Argument Combination: Can not read and set "
+                      "values at the same time\n",
+              program_name);
       return TS_ERR_FAIL;
     } else {
       TSMgmtError err;
@@ -222,7 +228,9 @@ handleArgInvocation()
     }
   } else if (*MatchVar != '\0') { // Handle a value read
     if (*SetVar != '\0' || *VarValue != '\0') {
-      fprintf(stderr, "%s: Invalid Argument Combination: Can not read and set values at the same time\n", program_name);
+      fprintf(stderr, "%s: Invalid Argument Combination: Can not read and set "
+                      "values at the same time\n",
+              program_name);
       return TS_ERR_FAIL;
     } else {
       TSMgmtError err;
@@ -234,7 +242,8 @@ handleArgInvocation()
         ats_free(msg);
       }
 
-      // If the RPC call failed, the list will be empty, so we won't print anything. Otherwise,
+      // If the RPC call failed, the list will be empty, so we won't print
+      // anything. Otherwise,
       // print all the results, freeing them as we go.
       for (TSRecordEle *rec_ele = (TSRecordEle *)TSListDequeue(list); rec_ele; rec_ele = (TSRecordEle *)TSListDequeue(list)) {
         switch (rec_ele->rec_type) {
@@ -334,7 +343,8 @@ main(int /* argc ATS_UNUSED */, const char **argv)
   ShowStatus = 0;
   ClearAlarms[0] = '\0';
 
-  /* Argument description table used to describe how to parse command line args, */
+  /* Argument description table used to describe how to parse command line args,
+   */
   /* see 'ink_args.h' for meanings of the various fields */
   ArgumentDescription argument_descriptions[] = {
     {"query_deadhosts", 'q', "Query congested sites", "F", &QueryDeadhosts, NULL, NULL},
@@ -367,7 +377,8 @@ main(int /* argc ATS_UNUSED */, const char **argv)
 
   // Connect to Local Manager and do it.
   if (TS_ERR_OKAY != TSInit(NULL, static_cast<TSInitOptionT>(TS_MGMT_OPT_NO_EVENTS | TS_MGMT_OPT_NO_SOCK_TESTS))) {
-    fprintf(stderr, "error: could not connect to management port, make sure traffic_manager is running\n");
+    fprintf(stderr, "error: could not connect to management port, make sure "
+                    "traffic_manager is running\n");
     exit(1);
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/AddConfigFilesHere.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/AddConfigFilesHere.cc b/cmd/traffic_manager/AddConfigFilesHere.cc
index ea9066b..76c223b 100644
--- a/cmd/traffic_manager/AddConfigFilesHere.cc
+++ b/cmd/traffic_manager/AddConfigFilesHere.cc
@@ -42,7 +42,6 @@ testcall(char *foo, bool /* incVersion */)
   Debug("lm", "Received Callback that %s has changed\n", foo);
 }
 
-
 //
 // initializeRegistry()
 //

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/StatProcessor.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/StatProcessor.cc b/cmd/traffic_manager/StatProcessor.cc
index 9202aca..0261689 100644
--- a/cmd/traffic_manager/StatProcessor.cc
+++ b/cmd/traffic_manager/StatProcessor.cc
@@ -64,7 +64,6 @@ xml_strcmp(const xmlchar *s1, const char *s2)
   return strcmp((const char *)s1, s2);
 }
 
-
 static void
 elementStart(void * /* userData ATS_UNUSED */, const xmlchar *name, const xmlchar **atts)
 {
@@ -138,7 +137,6 @@ elementStart(void * /* userData ATS_UNUSED */, const xmlchar *name, const xmlcha
   }
 }
 
-
 static void
 elementEnd(void * /* userData ATS_UNUSED */, const xmlchar * /* name ATS_UNUSED */)
 {
@@ -158,7 +156,6 @@ elementEnd(void * /* userData ATS_UNUSED */, const xmlchar * /* name ATS_UNUSED
   }
 }
 
-
 static void
 charDataHandler(void * /* userData ATS_UNUSED */, const xmlchar *name, int /* len ATS_UNUSED */)
 {
@@ -179,13 +176,11 @@ charDataHandler(void * /* userData ATS_UNUSED */, const xmlchar *name, int /* le
   }
 }
 
-
 StatProcessor::StatProcessor(FileManager *configFiles) : m_lmgmt(NULL), m_overviewGenerator(NULL)
 {
   rereadConfig(configFiles);
 }
 
-
 void
 StatProcessor::rereadConfig(FileManager *configFiles)
 {
@@ -256,19 +251,16 @@ StatProcessor::rereadConfig(FileManager *configFiles)
   xmlFreeParserCtxt(parser);
 #endif
 
-
   delete fileContent;
 
   Debug(MODULE_INIT, "\n\n---------- END OF PARSING & INITIALIZING ---------\n\n");
 }
 
-
 StatProcessor::~StatProcessor()
 {
   Debug(MODULE_INIT, "[StatProcessor] Destructing Statistics Processor\n");
 }
 
-
 void
 setTest()
 {
@@ -287,7 +279,6 @@ setTest()
   }
 }
 
-
 void
 verifyTest()
 {
@@ -319,7 +310,6 @@ verifyTest()
   }
 }
 
-
 /**
  * Updating the statistics NOW.
  **/
@@ -337,7 +327,6 @@ StatProcessor::processStat()
   return (result);
 }
 
-
 /**
  * ExpressionEval
  * --------------

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/StatProcessor.h
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/StatProcessor.h b/cmd/traffic_manager/StatProcessor.h
index c6b3dd8..caa2eab 100644
--- a/cmd/traffic_manager/StatProcessor.h
+++ b/cmd/traffic_manager/StatProcessor.h
@@ -75,7 +75,6 @@ public:
   overviewPage *m_overviewGenerator;
 };
 
-
 /**
  * External expression evaluation API.
  *

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/StatType.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/StatType.cc b/cmd/traffic_manager/StatType.cc
index 11b1484..d500a62 100644
--- a/cmd/traffic_manager/StatType.cc
+++ b/cmd/traffic_manager/StatType.cc
@@ -53,7 +53,6 @@ StatExprToken::StatExprToken()
   memset(&m_token_value_delta, 0, sizeof(m_token_value_delta));
 }
 
-
 /**
  * StatExprToken::copy()
  * ---------------------
@@ -84,7 +83,6 @@ StatExprToken::copy(const StatExprToken &source)
   m_sum_var = source.m_sum_var;
 }
 
-
 /**
  * StatExprToken::assignTokenName()
  * --------------------------------
@@ -135,7 +133,6 @@ StatExprToken::assignTokenName(const char *name)
   }
 }
 
-
 /**
  * assignTokenType()
  * -----------------
@@ -164,7 +161,6 @@ StatExprToken::assignTokenType()
   return (m_token_type != RECD_NULL);
 }
 
-
 void
 StatExprToken::clean()
 {
@@ -172,7 +168,6 @@ StatExprToken::clean()
   delete m_token_value_delta;
 }
 
-
 /**
  * FOR DEBUGGING ONLY
  * Print the token according to its type in a human-readable format. :)
@@ -187,7 +182,6 @@ StatExprToken::print(const char *prefix)
   }
 }
 
-
 /**
  * StatExprToken::precedence()
  * ---------------------------
@@ -214,7 +208,6 @@ StatExprToken::precedence()
   }
 }
 
-
 /**
  * StatExprToken::statVarSet()
  * ---------------------------
@@ -280,7 +273,6 @@ StatExprToken::statVarSet(RecDataT type, RecData value)
   return varSetData(m_token_type, m_token_name, converted_value);
 }
 
-
 /***********************************************************************
                                                  StatExprList
  **********************************************************************/
@@ -293,7 +285,6 @@ StatExprList::StatExprList() : m_size(0)
 {
 }
 
-
 /**
  * StatExprList::clean()
  * ---------------------
@@ -310,7 +301,6 @@ StatExprList::clean()
   ink_assert(m_size == 0);
 }
 
-
 void
 StatExprList::enqueue(StatExprToken *entry)
 {
@@ -319,7 +309,6 @@ StatExprList::enqueue(StatExprToken *entry)
   m_size += 1;
 }
 
-
 void
 StatExprList::push(StatExprToken *entry)
 {
@@ -328,7 +317,6 @@ StatExprList::push(StatExprToken *entry)
   m_size += 1;
 }
 
-
 StatExprToken *
 StatExprList::dequeue()
 {
@@ -339,7 +327,6 @@ StatExprList::dequeue()
   return (StatExprToken *)m_tokenList.dequeue();
 }
 
-
 StatExprToken *
 StatExprList::pop()
 {
@@ -350,7 +337,6 @@ StatExprList::pop()
   return m_tokenList.pop();
 }
 
-
 StatExprToken *
 StatExprList::top()
 {
@@ -360,7 +346,6 @@ StatExprList::top()
   return m_tokenList.head;
 }
 
-
 StatExprToken *
 StatExprList::first()
 {
@@ -370,7 +355,6 @@ StatExprList::first()
   return m_tokenList.head;
 }
 
-
 StatExprToken *
 StatExprList::next(StatExprToken *current)
 {
@@ -380,7 +364,6 @@ StatExprList::next(StatExprToken *current)
   return (current->link).next;
 }
 
-
 /**
  * StatExprList::print()
  * ---------------------
@@ -394,7 +377,6 @@ StatExprList::print(const char *prefix)
   }
 }
 
-
 /**
  * StatExprToken::count()
  * ----------------------
@@ -406,12 +388,10 @@ StatExprList::count()
   return m_size;
 }
 
-
 /***********************************************************************
                                                      StatObject
  **********************************************************************/
 
-
 /**
  * StatObject::StatObject()
  * ------------------------
@@ -424,7 +404,6 @@ StatObject::StatObject()
 {
 }
 
-
 StatObject::StatObject(unsigned identifier)
   : m_id(identifier), m_debug(false), m_expr_string(NULL), m_node_dest(NULL), m_cluster_dest(NULL), m_expression(NULL),
     m_postfix(NULL), m_last_update(-1), m_current_time(-1), m_update_interval(-1), m_stats_max(FLT_MAX), m_stats_min(FLT_MIN),
@@ -432,7 +411,6 @@ StatObject::StatObject(unsigned identifier)
 {
 }
 
-
 /**
  * StatObject::clean()
  * -------------------
@@ -446,7 +424,6 @@ StatObject::clean()
   delete m_postfix;
 }
 
-
 /**
  * StatObject::assignDst()
  * -----------------------
@@ -489,7 +466,6 @@ StatObject::assignDst(const char *str, bool m_node_var, bool m_sum_var)
   }
 }
 
-
 /**
  * StatObject::assignExpr()
  * ------------------------
@@ -553,7 +529,6 @@ StatObject::assignExpr(char *str)
   infix2postfix();
 }
 
-
 /**
  * StatObject::infix2postfix()
  * ---------------------------
@@ -628,7 +603,6 @@ StatObject::infix2postfix()
   m_expression = NULL;
 }
 
-
 /**
  * StatObject::NodeStatEval()
  * --------------------------
@@ -715,7 +689,6 @@ StatObject::NodeStatEval(RecDataT *result_type, bool cluster)
   return tempValue;
 }
 
-
 /**
  * StatObject::ClusterStatEval()
  * -----------------------------
@@ -743,7 +716,6 @@ StatObject::ClusterStatEval(RecDataT *result_type)
   }
 }
 
-
 /**
  * StatObject::setTokenValue()
  * ---------------------------
@@ -804,7 +776,6 @@ StatObject::setTokenValue(StatExprToken *token, bool cluster)
   }   // m_token_name?
 }
 
-
 /**
  * StatObject::StatBinaryEval()
  * ------------------------
@@ -934,7 +905,6 @@ StatObject::StatBinaryEval(StatExprToken *left, char op, StatExprToken *right, b
   return (result);
 }
 
-
 /***********************************************************************
                                                    StatObjectList
  **********************************************************************/
@@ -943,7 +913,6 @@ StatObjectList::StatObjectList() : m_size(0)
 {
 }
 
-
 void
 StatObjectList::clean()
 {
@@ -957,7 +926,6 @@ StatObjectList::clean()
   ink_assert(m_size == 0);
 }
 
-
 void
 StatObjectList::enqueue(StatObject *object)
 {
@@ -972,21 +940,18 @@ StatObjectList::enqueue(StatObject *object)
   m_size += 1;
 }
 
-
 StatObject *
 StatObjectList::first()
 {
   return m_statList.head;
 }
 
-
 StatObject *
 StatObjectList::next(StatObject *current)
 {
   return (current->link).next;
 }
 
-
 /**
  * StatObjectList::Eval()
  * ----------------------
@@ -1128,7 +1093,6 @@ StatObjectList::Eval()
   return count;
 } /* Eval() */
 
-
 /**
  * StatObjectList::print()
  * --------------------------

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/StatType.h
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/StatType.h b/cmd/traffic_manager/StatType.h
index 3ab0367..5f87134 100644
--- a/cmd/traffic_manager/StatType.h
+++ b/cmd/traffic_manager/StatType.h
@@ -112,7 +112,6 @@ public:
   bool statVarSet(RecDataT, RecData);
 };
 
-
 /**
  * StatExprList
  *   simply a list of StatExprToken.
@@ -181,7 +180,6 @@ private:
   void infix2postfix();
 };
 
-
 /**
  * StatObjectList
  *    simply a list of StatObject.

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/StatXML.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/StatXML.cc b/cmd/traffic_manager/StatXML.cc
index 1d6a7ba..57474e2 100644
--- a/cmd/traffic_manager/StatXML.cc
+++ b/cmd/traffic_manager/StatXML.cc
@@ -56,7 +56,6 @@ XML_extractContent(const char *name, char *content, size_t result_len)
   return (strlen(content));
 }
 
-
 //
 // Returns true  if 'c'is an operator (in our definition),
 //         false otherwise

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/StatXML.h
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/StatXML.h b/cmd/traffic_manager/StatXML.h
index 249be2f..f7428ca 100644
--- a/cmd/traffic_manager/StatXML.h
+++ b/cmd/traffic_manager/StatXML.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef _STATXML_H_
 #define _STATXML_H_
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_manager/WebOverview.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_manager/WebOverview.cc b/cmd/traffic_manager/WebOverview.cc
index 252820d..6ce5a24 100644
--- a/cmd/traffic_manager/WebOverview.cc
+++ b/cmd/traffic_manager/WebOverview.cc
@@ -76,7 +76,6 @@ overviewRecord::overviewRecord(unsigned long inet_addr, bool local, ClusterPeerI
     RecGetRecordOrderAndId(node_rec_data.recs[0].name, &node_rec_first_ix, NULL);
   }
 
-
   // Query for the name of the node.  If it is not there, some
   //   their cluster ip address
   name_l = this->readString("proxy.node.hostname_FQ", &name_found);
@@ -278,7 +277,7 @@ overviewPage::overviewPage() : sortRecords(10, false)
   nodeRecords = ink_hash_table_create(InkHashTableKeyType_Word);
   numHosts = 0;
   ourAddr = 0; // We will update this when we add the record for
-  //  this machine
+               //  this machine
 }
 
 overviewPage::~overviewPage()
@@ -331,7 +330,6 @@ overviewPage::checkForUpdates()
   ink_mutex_release(&accessLock);
 }
 
-
 // overrviewPage::sortHosts()
 //
 // resorts sortRecords, but always leaves the local node
@@ -428,7 +426,8 @@ overviewPage::findNodeByName(const char *nodeName)
   }
 }
 
-// MgmtString overviewPage::readString(const char* nodeName, char* *name, bool *found = NULL)
+// MgmtString overviewPage::readString(const char* nodeName, char* *name, bool
+// *found = NULL)
 //
 //   Looks up a node record for a specific by nodeName
 //    CALLEE deallocates the string with free()
@@ -457,7 +456,8 @@ overviewPage::readString(const char *nodeName, const char *name, bool *found)
   return r;
 }
 
-// MgmtInt overviewPage::readInteger(const char* nodeName, char* *name, bool *found = NULL)
+// MgmtInt overviewPage::readInteger(const char* nodeName, char* *name, bool
+// *found = NULL)
 //
 //   Looks up a node record for a specific by nodeName
 //
@@ -485,7 +485,8 @@ overviewPage::readInteger(const char *nodeName, const char *name, bool *found)
   return r;
 }
 
-// MgmtFloat overviewPage::readFloat(const char* nodeName, char* *name, bool *found = NULL)
+// MgmtFloat overviewPage::readFloat(const char* nodeName, char* *name, bool
+// *found = NULL)
 //
 //   Looks up a node record for a specific by nodeName
 //

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_top/stats.h
----------------------------------------------------------------------
diff --git a/cmd/traffic_top/stats.h b/cmd/traffic_top/stats.h
index 78d7f89..2b5c312 100644
--- a/cmd/traffic_top/stats.h
+++ b/cmd/traffic_top/stats.h
@@ -55,7 +55,6 @@ const char seperator[] = "\": \"";
 const char end[] = "\",\n";
 };
 
-
 //----------------------------------------------------------------------------
 class Stats
 {
@@ -116,7 +115,6 @@ public:
     lookup_table.insert(make_pair("server_req_conn", LookupItem("Req/Conn", "server_req", "server_conn", 3)));
     lookup_table.insert(make_pair("server_curr_conn", LookupItem("Curr Conn", "proxy.process.http.current_server_connections", 1)));
 
-
     lookup_table.insert(
       make_pair("client_head", LookupItem("Head Bytes", "proxy.process.http.user_agent_response_header_total_size", 2)));
     lookup_table.insert(
@@ -132,7 +130,6 @@ public:
     lookup_table.insert(make_pair("ka_total", LookupItem("KA Total", "proxy.process.net.dynamic_keep_alive_timeout_in_total", 2)));
     lookup_table.insert(make_pair("ka_count", LookupItem("KA Count", "proxy.process.net.dynamic_keep_alive_timeout_in_count", 2)));
 
-
     lookup_table.insert(make_pair("client_abort", LookupItem("Clnt Abort", "proxy.process.http.err_client_abort_count_stat", 2)));
     lookup_table.insert(make_pair("conn_fail", LookupItem("Conn Fail", "proxy.process.http.err_connect_fail_count_stat", 2)));
     lookup_table.insert(make_pair("abort", LookupItem("Abort", "proxy.process.http.transaction_counts.errors.aborts", 2)));
@@ -222,7 +219,6 @@ public:
     lookup_table.insert(make_pair("s_1m", LookupItem("1 MB", "proxy.process.http.response_document_size_1M", 5)));
     lookup_table.insert(make_pair("s_>1m", LookupItem("> 1 MB", "proxy.process.http.response_document_size_inf", 5)));
 
-
     // sum together
     lookup_table.insert(make_pair("ram_hit_miss", LookupItem("Ram Hit+Miss", "ram_hit", "ram_miss", 6)));
     lookup_table.insert(make_pair("client_net", LookupItem("Net (bits)", "client_head", "client_body", 7)));
@@ -233,7 +229,6 @@ public:
     lookup_table.insert(make_pair("server_size", LookupItem("Total Size", "server_head", "server_body", 6)));
     lookup_table.insert(make_pair("server_avg_size", LookupItem("Avg Size", "server_size", "server_req", 3)));
 
-
     lookup_table.insert(make_pair("total_time", LookupItem("Total Time", "proxy.process.http.total_transactions_time", 2)));
 
     // ratio
@@ -268,8 +263,10 @@ public:
             (*_stats)[key] = strValue;
           } else {
             if (TSRecordGetInt(item.name, &value) != TS_ERR_OKAY) {
-              fprintf(stderr, "Error getting stat: %s when calling TSRecordGetInt() failed: file \"%s\", line %d\n\n", item.name,
-                      __FILE__, __LINE__);
+              fprintf(stderr, "Error getting stat: %s when calling "
+                              "TSRecordGetInt() failed: file \"%s\", line "
+                              "%d\n\n",
+                      item.name, __FILE__, __LINE__);
               abort();
             }
             string key = item.name;
@@ -344,7 +341,6 @@ public:
     getStat(key, value, strtmp, typetmp, overrideType);
   }
 
-
   void
   getStat(const string &key, string &value)
   {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_top/traffic_top.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_top/traffic_top.cc b/cmd/traffic_top/traffic_top.cc
index df447c2..85bbd3b 100644
--- a/cmd/traffic_top/traffic_top.cc
+++ b/cmd/traffic_top/traffic_top.cc
@@ -154,7 +154,8 @@ response_code_page(Stats &stats)
 {
   attron(COLOR_PAIR(colorPair::border));
   attron(A_BOLD);
-  mvprintw(0, 0, "                              RESPONSE CODES                                   ");
+  mvprintw(0, 0, "                              RESPONSE CODES                 "
+                 "                  ");
   attroff(COLOR_PAIR(colorPair::border));
   attroff(A_BOLD);
 
@@ -229,22 +230,24 @@ help(const string &host, const string &version)
     attron(A_BOLD);
     mvprintw(0, 0, "Overview:");
     attroff(A_BOLD);
-    mvprintw(
-      1, 0,
-      "traffic_top is a top like program for Apache Traffic Server (ATS). "
-      "There is a lot of statistical information gathered by ATS. "
-      "This program tries to show some of the more important stats and gives a good overview of what the proxy server is doing. "
-      "Hopefully this can be used as a tool for diagnosing the proxy server if there are problems.");
+    mvprintw(1, 0, "traffic_top is a top like program for Apache Traffic Server (ATS). "
+                   "There is a lot of statistical information gathered by ATS. "
+                   "This program tries to show some of the more important stats and gives "
+                   "a good overview of what the proxy server is doing. "
+                   "Hopefully this can be used as a tool for diagnosing the proxy server "
+                   "if there are problems.");
 
     attron(A_BOLD);
     mvprintw(7, 0, "Definitions:");
     attroff(A_BOLD);
     mvprintw(8, 0, "Fresh      => Requests that were servered by fresh entries in cache");
-    mvprintw(9, 0, "Revalidate => Requests that contacted the origin to verify if still valid");
+    mvprintw(9, 0, "Revalidate => Requests that contacted the origin to verify "
+                   "if still valid");
     mvprintw(10, 0, "Cold       => Requests that were not in cache at all");
     mvprintw(11, 0, "Changed    => Requests that required entries in cache to be updated");
     mvprintw(12, 0, "Changed    => Requests that can't be cached for some reason");
-    mvprintw(12, 0, "No Cache   => Requests that the client sent Cache-Control: no-cache header");
+    mvprintw(12, 0, "No Cache   => Requests that the client sent "
+                    "Cache-Control: no-cache header");
 
     attron(COLOR_PAIR(colorPair::border));
     attron(A_BOLD);
@@ -400,7 +403,8 @@ main(int argc, char **argv)
   string url = "";
   if (optind >= argc) {
     if (TS_ERR_OKAY != TSInit(NULL, static_cast<TSInitOptionT>(TS_MGMT_OPT_NO_EVENTS | TS_MGMT_OPT_NO_SOCK_TESTS))) {
-      fprintf(stderr, "Error: missing URL on command line or error connecting to the local manager\n");
+      fprintf(stderr, "Error: missing URL on command line or error connecting "
+                      "to the local manager\n");
       usage();
     }
   } else {
@@ -424,7 +428,6 @@ main(int argc, char **argv)
   init_pair(colorPair::border, COLOR_WHITE, COLOR_BLUE);
   //  mvchgat(0, 0, -1, A_BLINK, 1, NULL);
 
-
   enum Page {
     MAIN_PAGE,
     RESPONSE_PAGE,
@@ -447,7 +450,6 @@ main(int argc, char **argv)
     attroff(COLOR_PAIR(colorPair::border));
     attroff(A_BOLD);
 
-
     if (page == MAIN_PAGE) {
       main_stats_page(stats);
     } else if (page == RESPONSE_PAGE) {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/cmd/traffic_via/traffic_via.cc
----------------------------------------------------------------------
diff --git a/cmd/traffic_via/traffic_via.cc b/cmd/traffic_via/traffic_via.cc
index 7426d1b..b377d5c 100644
--- a/cmd/traffic_via/traffic_via.cc
+++ b/cmd/traffic_via/traffic_via.cc
@@ -84,12 +84,16 @@ detailViaLookup(char flag)
     // Cache Lookup Result
     viaTable->next = new VIA("Cache Lookup Result");
     viaTable->next->viaData[(unsigned char)'C'] = "cache hit but config forces revalidate";
-    viaTable->next->viaData[(unsigned char)'I'] = "conditional miss (client sent conditional, fresh in cache, returned 412)";
+    viaTable->next->viaData[(unsigned char)'I'] = "conditional miss (client "
+                                                  "sent conditional, fresh in "
+                                                  "cache, returned 412)";
     viaTable->next->viaData[(unsigned char)' '] = "cache miss or no cache lookup";
     viaTable->next->viaData[(unsigned char)'U'] = "cache hit, but client forces revalidate (e.g. Pragma: no-cache)";
     viaTable->next->viaData[(unsigned char)'D'] = "cache hit, but method forces revalidated (e.g. ftp, not anonymous)";
     viaTable->next->viaData[(unsigned char)'M'] = "cache miss (url not in cache)";
-    viaTable->next->viaData[(unsigned char)'N'] = "conditional hit (client sent conditional, doc fresh in cache, returned 304)";
+    viaTable->next->viaData[(unsigned char)'N'] = "conditional hit (client "
+                                                  "sent conditional, doc fresh "
+                                                  "in cache, returned 304)";
     viaTable->next->viaData[(unsigned char)'H'] = "cache hit";
     viaTable->next->viaData[(unsigned char)'S'] = "cache hit, but expired";
     viaTable->next->viaData[(unsigned char)'K'] = "cookie miss";
@@ -254,8 +258,11 @@ decodeViaHeader(const char *str)
   // Invalid header size, come out.
   printf("\nInvalid VIA header. VIA header length should be 6 or 24 characters\n");
   printf("Valid via header format is "
-         "[u<client-stuff>c<cache-lookup-stuff>s<server-stuff>f<cache-fill-stuff>p<proxy-stuff>]e<error-codes>:t<tunneling-info>c<"
-         "cache type><cache-lookup-result>i<icp-conn-info>p<parent-proxy-conn-info>s<server-conn-info>]");
+         "[u<client-stuff>c<cache-lookup-stuff>s<server-stuff>f<cache-fill-"
+         "stuff>p<proxy-stuff>]e<error-codes>:t<tunneling-info>c<"
+         "cache "
+         "type><cache-lookup-result>i<icp-conn-info>p<parent-proxy-conn-info>s<"
+         "server-conn-info>]");
   return TS_ERR_FAIL;
 }
 
@@ -270,8 +277,8 @@ filterViaHeader()
   int errOffset;
   int pcreExecCode;
   int i;
-  const char *viaPattern =
-    "\\[([ucsfpe]+[^\\]]+)\\]"; // Regex to match via header with in [] which can start with character class ucsfpe
+  const char *viaPattern = "\\[([ucsfpe]+[^\\]]+)\\]"; // Regex to match via header with in [] which
+                                                       // can start with character class ucsfpe
   char *viaHeaderString;
   char viaHeader[1024];
 
@@ -301,7 +308,9 @@ filterViaHeader()
     // Match successful, but too many substrings
     if (pcreExecCode == 0) {
       pcreExecCode = SUBSTRING_VECTOR_COUNT / 3;
-      printf("Too many substrings were found. %d substrings couldn't fit into subStringVector\n", pcreExecCode - 1);
+      printf("Too many substrings were found. %d substrings couldn't fit into "
+             "subStringVector\n",
+             pcreExecCode - 1);
     }
 
     // Loop based on number of matches found

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/doc/arch/cache/cache-data-structures.en.rst
----------------------------------------------------------------------
diff --git a/doc/arch/cache/cache-data-structures.en.rst b/doc/arch/cache/cache-data-structures.en.rst
index 1158051..b172999 100644
--- a/doc/arch/cache/cache-data-structures.en.rst
+++ b/doc/arch/cache/cache-data-structures.en.rst
@@ -24,7 +24,8 @@ Cache Data Structures
 
 .. cpp:class:: OpenDir
 
-   An open directory entry. It contains all the information of a
+   This represents an open directory entry. An entry is open when there is an active write on the object. Read operations do not of themselves require an `OpenDir` but if there is already one for the object it will be used by the read operation to coordinate with the write operations.
+
    :cpp:class:`Dir` plus additional information from the first :cpp:class:`Doc`.
 
 .. cpp:class:: CacheVC

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/Cache.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/Cache.cc b/iocore/cache/Cache.cc
index 330aedd..cb7468d 100644
--- a/iocore/cache/Cache.cc
+++ b/iocore/cache/Cache.cc
@@ -312,7 +312,7 @@ CacheVC::CacheVC() : alternate_index(CACHE_ALT_INDEX_DEFAULT)
 }
 
 #ifdef HTTP_CACHE
-HTTPInfo::FragOffset *
+HTTPInfo::FragmentDescriptorTable *
 CacheVC::get_frag_table()
 {
   ink_assert(alternate.valid());
@@ -482,10 +482,29 @@ CacheVC::set_http_info(CacheHTTPInfo *ainfo)
   } else
     f.allow_empty_doc = 0;
   alternate.copy_shallow(ainfo);
+  // This is not a good place to do this but I can't figure out a better one. We must do it
+  // no earlier than this, because there's no actual alternate to store the value in before this
+  // and I don't know of any later point that's guaranteed to be called before this is needed.
+  alternate.m_alt->m_fixed_fragment_size = cache_config_target_fragment_size - sizeofDoc;
   ainfo->clear();
 }
 #endif
 
+int64_t
+CacheVC::set_inbound_range(int64_t min, int64_t max)
+{
+  resp_range.clear();
+  resp_range.getRangeSpec().add(min, max);
+  return 1 + (max - min);
+}
+
+void
+CacheVC::set_full_content_length(int64_t cl)
+{
+  alternate.object_size_set(cl);
+  resp_range.apply(cl);
+}
+
 bool
 CacheVC::set_pin_in_cache(time_t time_pin)
 {
@@ -501,6 +520,25 @@ CacheVC::set_pin_in_cache(time_t time_pin)
   return true;
 }
 
+void
+CacheVC::set_content_range(HTTPRangeSpec const &r)
+{
+  resp_range.getRangeSpec() = r;
+  resp_range.start();
+}
+
+bool
+CacheVC::get_uncached(HTTPRangeSpec const &req, HTTPRangeSpec &result, int64_t initial)
+{
+  HTTPRangeSpec::Range r =
+    od ? write_vector->get_uncached_hull(earliest_key, req, initial) : alternate.get_uncached_hull(req, initial);
+  if (r.isValid()) {
+    result.add(r);
+    return true;
+  }
+  return false;
+}
+
 bool
 CacheVC::set_disk_io_priority(int priority)
 {
@@ -559,6 +597,7 @@ Vol::close_read(CacheVC *cont)
   EThread *t = cont->mutex->thread_holding;
   ink_assert(t == this_ethread());
   ink_assert(t == mutex->thread_holding);
+  open_dir.close_entry(cont);
   if (dir_is_empty(&cont->earliest_dir))
     return 1;
   int i = dir_evac_bucket(&cont->earliest_dir);
@@ -1112,6 +1151,12 @@ CacheProcessor::open_read(Continuation *cont, const CacheKey *key, bool cluster_
 }
 
 inkcoreapi Action *
+CacheProcessor::open_read(Continuation *cont, CacheVConnection* writer, HTTPHdr* client_request_hdr)
+{
+  return caches[CACHE_FRAG_TYPE_HTTP]->open_read(cont, writer, client_request_hdr);
+}
+
+inkcoreapi Action *
 CacheProcessor::open_write(Continuation *cont, CacheKey *key, bool cluster_cache_local ATS_UNUSED, CacheFragType frag_type,
                            int expected_size ATS_UNUSED, int options, time_t pin_in_cache, char *hostname, int host_len)
 {
@@ -2164,6 +2209,19 @@ CacheVC::is_pread_capable()
   return !f.read_from_writer_called;
 }
 
+#if 0
+void
+CacheVC::get_missing_ranges(HTTPRangeSpec& missing)
+{
+  missing.reset();
+  if (0 == alternate.);
+  // For now we'll just compute the convex hull of the missing data.
+  for ( RangeBox::const_iterator spot = req.begin(), limit = req.end() ; spot != limit ; ++spot ) {
+    
+  }
+}
+#endif
+
 #define STORE_COLLISION 1
 
 #ifdef HTTP_CACHE
@@ -2189,7 +2247,7 @@ unmarshal_helper(Doc *doc, Ptr<IOBufferData> &buf, int &okay)
     @internal I looked at doing this in place (rather than a copy & modify) but
     - The in place logic would be even worse than this mess
     - It wouldn't save you that much, since you end up doing inserts early in the buffer.
-      Without extreme care in the logic it could end up doing more copying thatn
+      Without extreme care in the logic it could end up doing more copying than
       the simpler copy & modify.
 
     @internal This logic presumes the existence of some slack at the end of the buffer, which
@@ -2208,6 +2266,7 @@ upgrade_doc_version(Ptr<IOBufferData> &buf)
     if (0 == doc->hlen) {
       Debug("cache_bc", "Doc %p without header, no upgrade needed.", doc);
     } else if (CACHE_FRAG_TYPE_HTTP_V23 == doc->doc_type) {
+      typedef cache_bc::HTTPCacheFragmentTable::FragOffset FragOffset;
       cache_bc::HTTPCacheAlt_v21 *alt = reinterpret_cast<cache_bc::HTTPCacheAlt_v21 *>(doc->hdr());
       if (alt && alt->is_unmarshalled_format()) {
         Ptr<IOBufferData> d_buf(ioDataAllocator.alloc());
@@ -2215,9 +2274,8 @@ upgrade_doc_version(Ptr<IOBufferData> &buf)
         char *src;
         char *dst;
         char *hdr_limit = doc->data();
-        HTTPInfo::FragOffset *frags =
-          reinterpret_cast<HTTPInfo::FragOffset *>(static_cast<char *>(buf->data()) + cache_bc::sizeofDoc_v23);
-        int frag_count = doc->_flen / sizeof(HTTPInfo::FragOffset);
+        FragOffset *frags = reinterpret_cast<FragOffset *>(static_cast<char *>(buf->data()) + cache_bc::sizeofDoc_v23);
+        int frag_count = doc->_flen / sizeof(FragOffset);
         size_t n = 0;
         size_t content_size = doc->data_len();
 
@@ -2450,6 +2508,11 @@ CacheVC::handleRead(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
   io.action = this;
   io.thread = mutex->thread_holding->tt == DEDICATED ? AIO_CALLBACK_THREAD_ANY : mutex->thread_holding;
   SET_HANDLER(&CacheVC::handleReadDone);
+  {
+    char xt[33];
+    Debug("amc", "cache read : key = %s %" PRId64 " bytes at stripe offset =% " PRId64, key.toHexStr(xt), io.aiocb.aio_nbytes,
+          io.aiocb.aio_offset);
+  }
   ink_assert(ink_aio_read(&io) >= 0);
   CACHE_DEBUG_INCREMENT_DYN_STAT(cache_pread_count_stat);
   return EVENT_CONT;
@@ -2514,7 +2577,7 @@ CacheVC::removeEvent(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
       goto Lfree;
     }
     if (!f.remove_aborted_writers) {
-      if (vol->open_write(this, true, 1)) {
+      if (vol->open_write(this)) {
         // writer  exists
         ink_release_assert(od = vol->open_read(&key));
         od->dont_update_directory = 1;
@@ -3256,7 +3319,6 @@ CacheProcessor::open_read(Continuation *cont, const HttpCacheKey *key, bool clus
   return caches[type]->open_read(cont, &key->hash, request, params, type, key->hostname, key->hostlen);
 }
 
-
 //----------------------------------------------------------------------------
 Action *
 CacheProcessor::open_write(Continuation *cont, int expected_size, const HttpCacheKey *key, bool cluster_cache_local,

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/CacheDir.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/CacheDir.cc b/iocore/cache/CacheDir.cc
index 000bc46..6fe0ddb 100644
--- a/iocore/cache/CacheDir.cc
+++ b/iocore/cache/CacheDir.cc
@@ -63,52 +63,44 @@ OpenDir::OpenDir()
   SET_HANDLER(&OpenDir::signal_readers);
 }
 
-/*
-   If allow_if_writers is false, open_write fails if there are other writers.
-   max_writers sets the maximum number of concurrent writers that are
-   allowed. Only The first writer can set the max_writers. It is ignored
-   for later writers.
-   Returns 1 on success and 0 on failure.
-   */
-int
-OpenDir::open_write(CacheVC *cont, int allow_if_writers, int max_writers)
+OpenDirEntry *
+OpenDir::open_entry(Vol *vol, CryptoHash const &key, bool force_p)
 {
-  ink_assert(cont->vol->mutex->thread_holding == this_ethread());
-  unsigned int h = cont->first_key.slice32(0);
+  ink_assert(vol->mutex->thread_holding == this_ethread());
+  unsigned int h = key.slice32(0);
   int b = h % OPEN_DIR_BUCKETS;
   for (OpenDirEntry *d = bucket[b].head; d; d = d->link.next) {
-    if (!(d->writers.head->first_key == cont->first_key))
+    if (!(d->first_key == key))
       continue;
-    if (allow_if_writers && d->num_writers < d->max_writers) {
-      d->writers.push(cont);
-      d->num_writers++;
-      cont->od = d;
-      cont->write_vector = &d->vector;
-      return 1;
-    }
-    return 0;
+    ++(d->num_active);
+    //    cont->od = d;
+    //    cont->write_vector = &d->vector;
+    return d;
   }
-  OpenDirEntry *od = THREAD_ALLOC(openDirEntryAllocator, cont->mutex->thread_holding);
-  od->readers.head = NULL;
-  od->writers.push(cont);
-  od->num_writers = 1;
-  od->max_writers = max_writers;
+
+  if (!force_p)
+    return NULL;
+
+  OpenDirEntry *od = THREAD_ALLOC(openDirEntryAllocator, vol->mutex->thread_holding);
+  od->mutex = new_ProxyMutex();
+  od->first_key = key;
+  od->num_active = 1;
   od->vector.data.data = &od->vector.data.fast_data[0];
   od->dont_update_directory = 0;
   od->move_resident_alt = 0;
   od->reading_vec = 0;
   od->writing_vec = 0;
   dir_clear(&od->first_dir);
-  cont->od = od;
-  cont->write_vector = &od->vector;
+  //  cont->od = od;
+  //  cont->write_vector = &od->vector;
   bucket[b].push(od);
-  return 1;
+  return od;
 }
 
 int
 OpenDir::signal_readers(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
 {
-  Queue<CacheVC, Link_CacheVC_opendir_link> newly_delayed_readers;
+  CacheVCQ newly_delayed_readers;
   EThread *t = mutex->thread_holding;
   CacheVC *c = NULL;
   while ((c = delayed_readers.dequeue())) {
@@ -130,32 +122,29 @@ OpenDir::signal_readers(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
   return 0;
 }
 
-int
-OpenDir::close_write(CacheVC *cont)
+void
+OpenDir::close_entry(CacheVC *vc)
 {
-  ink_assert(cont->vol->mutex->thread_holding == this_ethread());
-  cont->od->writers.remove(cont);
-  cont->od->num_writers--;
-  if (!cont->od->writers.head) {
-    unsigned int h = cont->first_key.slice32(0);
+  ink_assert(vc->vol->mutex->thread_holding == this_ethread());
+  if (vc->od && --(vc->od->num_active) < 1) {
+    unsigned int h = vc->od->first_key.slice32(0);
     int b = h % OPEN_DIR_BUCKETS;
-    bucket[b].remove(cont->od);
-    delayed_readers.append(cont->od->readers);
-    signal_readers(0, 0);
-    cont->od->vector.clear();
-    THREAD_FREE(cont->od, openDirEntryAllocator, cont->mutex->thread_holding);
+    bucket[b].remove(vc->od);
+    vc->od->vector.clear();
+    vc->od->mutex = 0;
+    THREAD_FREE(vc->od, openDirEntryAllocator, vc->vol->mutex->thread_holding);
   }
-  cont->od = NULL;
-  return 0;
+  vc->od = NULL;
 }
 
+#if 0
 OpenDirEntry *
 OpenDir::open_read(const CryptoHash *key)
 {
   unsigned int h = key->slice32(0);
   int b = h % OPEN_DIR_BUCKETS;
   for (OpenDirEntry *d = bucket[b].head; d; d = d->link.next)
-    if (d->writers.head->first_key == *key)
+    if (d->first_key == *key)
       return d;
   return NULL;
 }
@@ -170,6 +159,61 @@ OpenDirEntry::wait(CacheVC *cont, int msec)
   readers.push(cont);
   return EVENT_CONT;
 }
+#endif
+
+int
+OpenDirEntry::index_of(CacheKey const &alt_key)
+{
+  return vector.index_of(alt_key);
+}
+
+bool
+OpenDirEntry::has_writer(CacheKey const &alt_key)
+{
+  return vector.has_writer(alt_key);
+}
+
+OpenDirEntry &
+OpenDirEntry::write_active(CacheKey const &alt_key, CacheVC *vc, int64_t offset)
+{
+  Debug("amc", "VC %p write active @ %" PRId64, vc, offset);
+  vector.write_active(alt_key, vc, offset);
+  return *this;
+}
+
+OpenDirEntry &
+OpenDirEntry::write_complete(CacheKey const &alt_key, CacheVC *vc, bool success)
+{
+  Debug("amc", "[OpenDir::write_complete] VC %p write %s", vc, (success ? "succeeded" : "failed"));
+  vector.write_complete(alt_key, vc, success);
+  return *this;
+}
+
+bool
+OpenDirEntry::is_write_active(CacheKey const &alt_key, int64_t offset)
+{
+  return vector.is_write_active(alt_key, offset);
+}
+
+CacheKey const &
+OpenDirEntry::key_for(CacheKey const &alt_key, int64_t offset)
+{
+  return vector.key_for(alt_key, offset);
+}
+
+bool
+OpenDirEntry::wait_for(CacheKey const &alt_key, CacheVC *vc, int64_t offset)
+{
+  Debug("amc", "vc %p waiting for %" PRId64, vc, offset);
+  return vector.wait_for(alt_key, vc, offset);
+}
+
+OpenDirEntry &
+OpenDirEntry::close_writer(CacheKey const &alt_key, CacheVC *vc)
+{
+  vector.close_writer(alt_key, vc);
+  return *this;
+}
 
 //
 // Cache Directory

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/CacheHttp.cc
----------------------------------------------------------------------
diff --git a/iocore/cache/CacheHttp.cc b/iocore/cache/CacheHttp.cc
index 3d7dfba..0ba989d 100644
--- a/iocore/cache/CacheHttp.cc
+++ b/iocore/cache/CacheHttp.cc
@@ -29,10 +29,10 @@
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
 
-static vec_info default_vec_info;
+// Guaranteed to be all zero?
+static CacheHTTPInfoVector::Item default_vec_info;
 
 #ifdef HTTP_CACHE
-static CacheHTTPInfo default_http_info;
 
 CacheHTTPInfoVector::CacheHTTPInfoVector() : magic(NULL), data(&default_vec_info, 4), xcount(0)
 {
@@ -46,7 +46,7 @@ CacheHTTPInfoVector::~CacheHTTPInfoVector()
   int i;
 
   for (i = 0; i < xcount; i++) {
-    data[i].alternate.destroy();
+    data[i]._alternate.destroy();
   }
   vector_buf.clear();
   magic = NULL;
@@ -61,7 +61,7 @@ CacheHTTPInfoVector::insert(CacheHTTPInfo *info, int index)
   if (index == CACHE_ALT_INDEX_DEFAULT)
     index = xcount++;
 
-  data(index).alternate.copy_shallow(info);
+  data(index)._alternate.copy_shallow(info);
   return index;
 }
 
@@ -77,8 +77,8 @@ CacheHTTPInfoVector::detach(int idx, CacheHTTPInfo *r)
   ink_assert(idx >= 0);
   ink_assert(idx < xcount);
 
-  r->copy_shallow(&data[idx].alternate);
-  data[idx].alternate.destroy();
+  r->copy_shallow(&data[idx]._alternate);
+  data[idx]._alternate.destroy();
 
   for (i = idx; i < (xcount - 1); i++) {
     data[i] = data[i + i];
@@ -94,7 +94,7 @@ void
 CacheHTTPInfoVector::remove(int idx, bool destroy)
 {
   if (destroy)
-    data[idx].alternate.destroy();
+    data[idx]._alternate.destroy();
 
   for (; idx < (xcount - 1); idx++)
     data[idx] = data[idx + 1];
@@ -112,7 +112,7 @@ CacheHTTPInfoVector::clear(bool destroy)
 
   if (destroy) {
     for (i = 0; i < xcount; i++) {
-      data[i].alternate.destroy();
+      data[i]._alternate.destroy();
     }
   }
   xcount = 0;
@@ -134,14 +134,14 @@ CacheHTTPInfoVector::print(char *buffer, size_t buf_size, bool temps)
   purl = 1;
 
   for (i = 0; i < xcount; i++) {
-    if (data[i].alternate.valid()) {
+    if (data[i]._alternate.valid()) {
       if (purl) {
         Arena arena;
         char *url;
 
         purl = 0;
         URL u;
-        data[i].alternate.request_url_get(&u);
+        data[i]._alternate.request_url_get(&u);
         url = u.string_get(&arena);
         if (url) {
           snprintf(p, buf_size, "[%s] ", url);
@@ -151,8 +151,8 @@ CacheHTTPInfoVector::print(char *buffer, size_t buf_size, bool temps)
         }
       }
 
-      if (temps || !(data[i].alternate.object_key_get() == zero_key)) {
-        snprintf(p, buf_size, "[%d %s]", data[i].alternate.id_get(), CacheKey(data[i].alternate.object_key_get()).toHexStr(buf));
+      if (temps || !(data[i]._alternate.object_key_get() == zero_key)) {
+        snprintf(p, buf_size, "[%d %s]", data[i]._alternate.id_get(), CacheKey(data[i]._alternate.object_key_get()).toHexStr(buf));
         tmp = strlen(p);
         p += tmp;
         buf_size -= tmp;
@@ -170,7 +170,7 @@ CacheHTTPInfoVector::marshal_length()
   int length = 0;
 
   for (int i = 0; i < xcount; i++) {
-    length += data[i].alternate.marshal_length();
+    length += data[i]._alternate.marshal_length();
   }
 
   return length;
@@ -187,7 +187,7 @@ CacheHTTPInfoVector::marshal(char *buf, int length)
   ink_assert(!(((intptr_t)buf) & 3)); // buf must be aligned
 
   for (int i = 0; i < xcount; i++) {
-    int tmp = data[i].alternate.marshal(buf, length);
+    int tmp = data[i]._alternate.marshal(buf, length);
     length -= tmp;
     buf += tmp;
     count++;
@@ -199,8 +199,10 @@ CacheHTTPInfoVector::marshal(char *buf, int length)
   return buf - start;
 }
 
-int
-CacheHTTPInfoVector::unmarshal(const char *buf, int length, RefCountObj *block_ptr)
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+uint32_t
+CacheHTTPInfoVector::get_handles(const char *buf, int length, RefCountObj *block_ptr)
 {
   ink_assert(!(((intptr_t)buf) & 3)); // buf must be aligned
 
@@ -208,50 +210,299 @@ CacheHTTPInfoVector::unmarshal(const char *buf, int length, RefCountObj *block_p
   CacheHTTPInfo info;
   xcount = 0;
 
+  vector_buf = block_ptr;
+
   while (length - (buf - start) > (int)sizeof(HTTPCacheAlt)) {
-    int tmp = HTTPInfo::unmarshal((char *)buf, length - (buf - start), block_ptr);
+    int tmp = info.get_handle((char *)buf, length - (buf - start));
     if (tmp < 0) {
-      return -1;
+      ink_assert(!"CacheHTTPInfoVector::unmarshal get_handle() failed");
+      return (uint32_t)-1;
     }
-    info.m_alt = (HTTPCacheAlt *)buf;
     buf += tmp;
 
-    data(xcount).alternate = info;
+    data(xcount)._alternate = info;
     xcount++;
   }
 
   return ((caddr_t)buf - (caddr_t)start);
 }
 
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+int
+CacheHTTPInfoVector::index_of(CacheKey const &alt_key)
+{
+  int zret;
+  for (zret = 0; zret < xcount && alt_key != data[zret]._alternate.object_key_get(); ++zret)
+    ;
+  return zret < xcount ? zret : -1;
+}
 
 /*-------------------------------------------------------------------------
   -------------------------------------------------------------------------*/
-uint32_t
-CacheHTTPInfoVector::get_handles(const char *buf, int length, RefCountObj *block_ptr)
+
+CacheKey const &
+CacheHTTPInfoVector::key_for(CacheKey const &alt_key, int64_t offset)
 {
-  ink_assert(!(((intptr_t)buf) & 3)); // buf must be aligned
+  int idx = this->index_of(alt_key);
+  Item &item = data[idx];
+  return item._alternate.get_frag_key_of(offset);
+}
 
-  const char *start = buf;
-  CacheHTTPInfo info;
-  xcount = 0;
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
 
-  vector_buf = block_ptr;
+CacheHTTPInfoVector &
+CacheHTTPInfoVector::write_active(CacheKey const &alt_key, CacheVC *vc, int64_t offset)
+{
+  int idx = this->index_of(alt_key);
+  Item &item = data[idx];
 
-  while (length - (buf - start) > (int)sizeof(HTTPCacheAlt)) {
-    int tmp = info.get_handle((char *)buf, length - (buf - start));
-    if (tmp < 0) {
-      ink_assert(!"CacheHTTPInfoVector::unmarshal get_handle() failed");
-      return (uint32_t)-1;
+  Debug("amc", "[CacheHTTPInfoVector::write_active] VC %p write %" PRId64, vc, offset);
+
+  vc->fragment = item._alternate.get_frag_index_of(offset);
+  item._active.push(vc);
+  return *this;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+CacheHTTPInfoVector &
+CacheHTTPInfoVector::write_complete(CacheKey const &alt_key, CacheVC *vc, bool success)
+{
+  int idx = this->index_of(alt_key);
+  Item &item = data[idx];
+  CacheVC *reader;
+
+  Debug("amc", "[CacheHTTPInfoVector::write_complete] VC %p write %s", vc, (success ? "succeeded" : "failed"));
+
+  item._active.remove(vc);
+  if (success)
+    item._alternate.mark_frag_write(vc->fragment);
+
+  // Kick all the waiters, success or fail.
+  while (NULL != (reader = item._waiting.pop())) {
+    Debug("amc", "[write_complete] wake up %p", reader);
+    reader->wake_up_thread->schedule_imm(reader)->cookie = reinterpret_cast<void *>(0x56);
+  }
+
+  return *this;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+bool
+CacheHTTPInfoVector::has_writer(CacheKey const &alt_key)
+{
+  int alt_idx = this->index_of(alt_key);
+  return alt_idx >= 0 && data[alt_idx]._writers.head != NULL;
+}
+
+bool
+CacheHTTPInfoVector::is_write_active(CacheKey const &alt_key, int64_t offset)
+{
+  int alt_idx = this->index_of(alt_key);
+  Item &item = data[alt_idx];
+  int frag_idx = item._alternate.get_frag_index_of(offset);
+  for (CacheVC *vc = item._active.head; vc; vc = item._active.next(vc)) {
+    if (vc->fragment == frag_idx)
+      return true;
+  }
+  return false;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+bool
+CacheHTTPInfoVector::wait_for(CacheKey const &alt_key, CacheVC *vc, int64_t offset)
+{
+  bool zret = true;
+  int alt_idx = this->index_of(alt_key);
+  Item &item = data[alt_idx];
+  int frag_idx = item._alternate.get_frag_index_of(offset);
+  vc->fragment = frag_idx; // really? Shouldn't this already be set?
+  if (item.has_writers()) {
+    if (!item._waiting.in(vc))
+      item._waiting.push(vc);
+  } else {
+    zret = false;
+  }
+  return zret;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+CacheHTTPInfoVector &
+CacheHTTPInfoVector::close_writer(CacheKey const &alt_key, CacheVC *vc)
+{
+  CacheVC *reader;
+  int alt_idx = this->index_of(alt_key);
+  Item &item = data[alt_idx];
+  item._writers.remove(vc);
+  while (NULL != (reader = item._waiting.pop())) {
+    Debug("amc", "[close_writer] wake up %p", reader);
+    reader->wake_up_thread->schedule_imm(reader)->cookie = reinterpret_cast<void *>(0x56);
+  }
+  return *this;
+}
+
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+HTTPRangeSpec::Range
+CacheHTTPInfoVector::get_uncached_hull(CacheKey const &alt_key, HTTPRangeSpec const &req, int64_t initial)
+{
+  int alt_idx = this->index_of(alt_key);
+  Item &item = data[alt_idx];
+  Queue<CacheVC, Link_CacheVC_OpenDir_Link> writers;
+  CacheVC *vc;
+  CacheVC *cycle_vc = NULL;
+  // Yeah, this need to be tunable.
+  uint64_t DELTA = item._alternate.get_frag_fixed_size() * 16;
+  HTTPRangeSpec::Range r(item._alternate.get_uncached_hull(req, initial));
+
+  if (r.isValid()) {
+    /* Now clip against the writers.
+       We move all the writers to a local list and move them back as we are done using them to clip.
+       This is so we don't skip a potentially valid writer because they are not in start order.
+    */
+    writers.append(item._writers);
+    item._writers.clear();
+    while (r._min < r._max && NULL != (vc = writers.pop())) {
+      uint64_t base = static_cast<int64_t>(writers.head->resp_range.getOffset());
+      uint64_t delta = static_cast<int64_t>(writers.head->resp_range.getRemnantSize());
+
+      if (base + delta < r._min || base > r._max) {
+        item._writers.push(vc); // of no use to us, just put it back.
+      } else if (base < r._min + DELTA) {
+        r._min = base + delta;     // we can wait, so depend on this writer and clip.
+        item._writers.push(vc);    // we're done with it, put it back.
+        cycle_vc = NULL;           // we did something so clear cycle indicator
+      } else if (vc == cycle_vc) { // we're looping.
+        item._writers.push(vc);    // put this one back.
+        while (NULL != (vc = writers.pop()))
+          item._writers.push(vc); // and the rest.
+      } else {
+        writers.enqueue(vc); // put it back to later checking.
+        if (NULL == cycle_vc)
+          cycle_vc = vc; // but keep an eye out for it coming around again.
+      }
     }
-    buf += tmp;
+  }
+  return r;
+}
 
-    data(xcount).alternate = info;
-    xcount++;
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
+void
+CacheRange::clear()
+{
+  _offset = 0;
+  _idx = -1;
+  _pending_range_shift_p = false;
+  _ct_field = NULL; // need to do real cleanup at some point.
+  _r.clear();
+}
+
+bool
+CacheRange::init(HTTPHdr *req)
+{
+  bool zret = true;
+  MIMEField *rf = req->field_find(MIME_FIELD_RANGE, MIME_LEN_RANGE);
+  if (rf) {
+    int len;
+    char const *val = rf->value_get(&len);
+    zret = _r.parseRangeFieldValue(val, len);
   }
+  return zret;
+}
 
-  return ((caddr_t)buf - (caddr_t)start);
+bool
+CacheRange::start()
+{
+  bool zret = true;
+
+  if (_r.hasRanges()) {
+    _offset = _r[_idx = 0]._min;
+    _pending_range_shift_p = _r.isMulti();
+  } else if (_r.isEmpty()) {
+    _offset = 0;
+  } else {
+    zret = false;
+  }
+  return zret;
+}
+
+bool
+CacheRange::apply(uint64_t len)
+{
+  bool zret = _r.apply(len);
+  if (zret) {
+    _len = len;
+    if (_r.hasRanges()) {
+      _offset = _r[_idx = 0]._min;
+      if (_r.isMulti())
+        _pending_range_shift_p = true;
+    }
+  }
+  return zret;
+}
+
+uint64_t
+CacheRange::consume(uint64_t size)
+{
+  switch (_r._state) {
+  case HTTPRangeSpec::EMPTY:
+    _offset += size;
+    break;
+  case HTTPRangeSpec::SINGLE:
+    _offset += std::min(size, (_r._single._max - _offset) + 1);
+    break;
+  case HTTPRangeSpec::MULTI:
+    ink_assert(_idx < static_cast<int>(_r.count()));
+    // Must not consume more than 1 range or the boundary strings won't get sent.
+    ink_assert(!_pending_range_shift_p);
+    ink_assert(size <= (_r[_idx]._max - _offset) + 1);
+    _offset += size;
+    if (_offset > _r[_idx]._max && ++_idx < static_cast<int>(_r.count())) {
+      _offset = _r[_idx]._min;
+      _pending_range_shift_p = true;
+    }
+    break;
+  default:
+    break;
+  }
+
+  return _offset;
+}
+
+CacheRange &
+CacheRange::generateBoundaryStr(CacheKey const &key)
+{
+  uint64_t rnd = this_ethread()->generator.random();
+  snprintf(_boundary, sizeof(_boundary), "%016" PRIx64 "%016" PRIx64 "..%016" PRIx64, key.slice64(0), key.slice64(1), rnd);
+  // GAH! snprintf null terminates so we can't actually print the last nybble that way and all of
+  // the internal hex converters do the same thing. This is crazy code I need to fix at some point.
+  // It is critical to print every nybble or the content lengths won't add up.
+  _boundary[HTTP_RANGE_BOUNDARY_LEN - 1] = "0123456789abcdef"[rnd & 0xf];
+  return *this;
+}
+
+uint64_t
+CacheRange::calcContentLength() const
+{
+  return _r.calcContentLength(_len, _ct_field ? _ct_field->m_len_value : 0);
 }
 
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
+
 #else // HTTP_CACHE
 
 CacheHTTPInfoVector::CacheHTTPInfoVector() : data(&default_vec_info, 4), xcount(0)
@@ -348,5 +599,7 @@ CacheHTTPInfoVector::get_handles(const char * /* buf ATS_UNUSED */, int /* lengt
   ink_assert(0);
   return 0;
 }
+/*-------------------------------------------------------------------------
+  -------------------------------------------------------------------------*/
 
 #endif // HTTP_CACHE


[6/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/P_CacheHttp.h
----------------------------------------------------------------------
diff --git a/iocore/cache/P_CacheHttp.h b/iocore/cache/P_CacheHttp.h
index c5bf87e..ea0ac43 100644
--- a/iocore/cache/P_CacheHttp.h
+++ b/iocore/cache/P_CacheHttp.h
@@ -34,7 +34,6 @@ typedef URL CacheURL;
 typedef HTTPHdr CacheHTTPHdr;
 typedef HTTPInfo CacheHTTPInfo;
 
-
 #define OFFSET_BITS 24
 enum {
   OWNER_NONE = 0,
@@ -48,11 +47,57 @@ struct CacheHTTPInfo {
 
 #endif // HTTP_CACHE
 
-struct vec_info {
-  CacheHTTPInfo alternate;
-};
+LINK_FORWARD_DECLARATION(CacheVC, OpenDir_Link) // forward declaration
+LINK_FORWARD_DECLARATION(CacheVC, Active_Link)  // forward declaration
 
 struct CacheHTTPInfoVector {
+  typedef CacheHTTPInfoVector self; ///< Self reference type.
+
+  struct Item {
+    /// Descriptor for an alternate for this object.
+    CacheHTTPInfo _alternate;
+    /// CacheVCs which are writing data to this alternate.
+    DLL<CacheVC, Link_CacheVC_OpenDir_Link> _writers;
+    ///@{ Active I/O
+    /** These two lists tracks active / outstanding I/O operations on The @a _active list is for writers
+        and the CacheVC should be on this list iff it has initiated an I/O that has not yet
+        completed. The @a _waiting list is for CacheVCs that are waiting for a fragment that is being written
+        by a CacheVC on the @a _active list. That is, it is waiting on the same I/O operation as an @a _active
+        CacheVC.
+
+        @internal An alternative implementation would be to have an array with an element for each fragment. With
+        this scheme we will have to linear search these lists to find the corresponding fragment I/O if any.
+        However, these lists should be short (only very rarely more than 1 or 2) and an array, given the ever
+        larger objects to be stored, would be large and require allocation. For these reasons I think this is the
+        better choice.
+    */
+    /// CacheVCs with pending write I/O.
+    DLL<CacheVC, Link_CacheVC_Active_Link> _active;
+    /// CacheVCs waiting on fragments.
+    DLL<CacheVC, Link_CacheVC_Active_Link> _waiting;
+    // To minimize list walking, we track the convex hull of fragments for which readers are waiting.
+    // We update the values whenever we must actually walk the list.
+    // Otherwise we maintain the convex hull invariant so if a written fragment is outside the range,
+    // we can assume no reader was waiting for it.
+    /// lowest fragment index for which a reader is waiting.
+    int _wait_idx_min;
+    /// highest fragment inddex for which a reader is waiting.
+    int _wait_idx_max;
+    /// Flag
+    union {
+      uint16_t _flags;
+      struct {
+        unsigned int dirty : 1;
+      } f;
+    };
+    ///@}
+    /// Check if there are any writers.
+    /// @internal Need to augment this at some point to check for writers to a specific offset.
+    bool has_writers() const;
+  };
+
+  typedef CacheArray<Item> InfoVector;
+
   void *magic;
 
   CacheHTTPInfoVector();
@@ -63,6 +108,7 @@ struct CacheHTTPInfoVector {
   {
     return xcount;
   }
+
   int insert(CacheHTTPInfo *info, int id = -1);
   CacheHTTPInfo *get(int idx);
   void detach(int idx, CacheHTTPInfo *r);
@@ -79,19 +125,246 @@ struct CacheHTTPInfoVector {
   int marshal_length();
   int marshal(char *buf, int length);
   uint32_t get_handles(const char *buf, int length, RefCountObj *block_ptr = NULL);
-  int unmarshal(const char *buf, int length, RefCountObj *block_ptr);
 
-  CacheArray<vec_info> data;
+  /// Get the alternate index for the @a key.
+  int index_of(CacheKey const &key);
+  /// Check if there are any writers for the alternate of @a alt_key.
+  bool has_writer(CacheKey const &alt_key);
+  /// Mark a @c CacheVC as actively writing at @a offset on the alternate with @a alt_key.
+  self &write_active(CacheKey const &alt_key, CacheVC *vc, int64_t offset);
+  /// Mark an active write by @a vc as complete and indicate whether it had @a success.
+  /// If the write is not @a success then the fragment is not marked as cached.
+  self &write_complete(CacheKey const &alt_key, CacheVC *vc, bool success = true);
+  /// Indicate if a VC is currently writing to the fragment with this @a offset.
+  bool is_write_active(CacheKey const &alt_key, int64_t offset);
+  /// Mark a CacheVC as waiting for the fragment containing the byte at @a offset.
+  /// @return @c false if there is no writer scheduled to write that offset.
+  bool wait_for(CacheKey const &alt_key, CacheVC *vc, int64_t offset);
+  /// Get the fragment key for a specific @a offset.
+  CacheKey const &key_for(CacheKey const &alt_key, int64_t offset);
+  /// Close out anything related to this writer
+  self &close_writer(CacheKey const &alt_key, CacheVC *vc);
+  /** Compute the convex hull of the uncached parts of the @a request taking current writers in to account.
+
+      @return @c true if there is uncached data that must be retrieved.
+   */
+  HTTPRangeSpec::Range get_uncached_hull(CacheKey const &alt_key, HTTPRangeSpec const &request, int64_t initial);
+
+  /** Sigh, yet another custom array class.
+      @c Vec doesn't work because it really only works well with pointers, not objects.
+  */
+  InfoVector data;
+
   int xcount;
   Ptr<RefCountObj> vector_buf;
 };
 
+/** Range operation tracking.
+
+    This holds a range specification. It also tracks the current object offset and the individual range.
+
+    For simplification of the logic that uses this class it will pretend to be a single range of
+    the object size if it is empty. To return the correct response we still need to distinuish
+    those two cases.
+*/
+class CacheRange
+{
+public:
+  typedef CacheRange self; ///< Self reference type.
+
+  /// Default constructor
+  CacheRange() : _offset(0), _idx(-1), _ct_field(NULL), _pending_range_shift_p(false) {}
+
+  /// Test if the range spec has actual ranges in it
+  bool hasRanges() const;
+
+  /// Test for multiple ranges.
+  bool isMulti() const;
+
+  /// Get the current object offset
+  uint64_t getOffset() const;
+
+  /// Get the current range index.
+  int getIdx() const;
+
+  /// Get the number of ranges.
+  size_t count() const;
+
+  /// Get the remaining contiguous bytes for the current range.
+  uint64_t getRemnantSize() const;
+
+  /** Advance @a size bytes in the range spec.
+
+      @return The resulting offset in the object.
+  */
+  uint64_t consume(uint64_t size);
+
+  /** Initialize from a request header.
+   */
+  bool init(HTTPHdr *req);
+
+  /** Set the range to the start of the range set.
+      @return @c true if there is a valid range, @c false otherwise.
+  */
+  bool start();
+
+  /** Apply a content @a len to the ranges.
+
+      @return @c true if successfully applied, @c false otherwise.
+  */
+  bool apply(uint64_t len);
+
+  /** Get the range boundary string.
+      @a len if not @c NULL receives the length of the string.
+  */
+  char const *getBoundaryStr(int *len) const;
+
+  /** Generate the range boundary string */
+  self &generateBoundaryStr(CacheKey const &key);
+
+  /// Get the cached Content-Type field.
+  MIMEField *getContentTypeField() const;
+
+  /// Set the Content-Type field from a response header.
+  self &setContentTypeFromResponse(HTTPHdr *resp);
+
+  /** Calculate the effective HTTP content length value.
+   */
+  uint64_t calcContentLength() const;
+
+  /// Raw access to internal range spec.
+  HTTPRangeSpec &getRangeSpec();
+
+  /// Test if a consume moved across a range boundary.
+  bool hasPendingRangeShift() const;
+
+  /// Clear the pending range shift flag.
+  self &consumeRangeShift();
+
+  /// Range access.
+  HTTPRangeSpec::Range &operator[](int n);
+
+  /// Range access.
+  HTTPRangeSpec::Range const &operator[](int n) const;
+
+  /// Reset to re-usable state.
+  void clear();
+
+protected:
+  uint64_t _len;        ///< Total object length.
+  uint64_t _offset;     ///< Offset in content.
+  int _idx;             ///< Current range index. (< 0 means not in a range)
+  HTTPRangeSpec _r;     ///< The actual ranges.
+  MIMEField *_ct_field; ///< Content-Type field.
+  char _boundary[HTTP_RANGE_BOUNDARY_LEN];
+  bool _pending_range_shift_p;
+};
+
+TS_INLINE bool
+CacheHTTPInfoVector::Item::has_writers() const
+{
+  return NULL != _writers.head;
+}
+
 TS_INLINE CacheHTTPInfo *
 CacheHTTPInfoVector::get(int idx)
 {
   ink_assert(idx >= 0);
   ink_assert(idx < xcount);
-  return &data[idx].alternate;
+  return &data[idx]._alternate;
+}
+
+inline bool
+CacheRange::hasRanges() const
+{
+  return _r.isSingle() || _r.isMulti();
+}
+
+inline uint64_t
+CacheRange::getOffset() const
+{
+  return _offset;
+}
+
+inline int
+CacheRange::getIdx() const
+{
+  return _idx;
+}
+
+inline uint64_t
+CacheRange::getRemnantSize() const
+{
+  uint64_t zret = 0;
+
+  if (_r.isEmpty())
+    zret = _len - _offset;
+  else if (_r.isValid() && 0 <= _idx && _idx < static_cast<int>(_r.count()))
+    zret = (_r[_idx]._max - _offset) + 1;
+
+  return zret;
+}
+
+inline char const *
+CacheRange::getBoundaryStr(int *len) const
+{
+  if (len)
+    *len = sizeof(_boundary);
+  return _boundary;
+}
+
+inline HTTPRangeSpec &
+CacheRange::getRangeSpec()
+{
+  return _r;
+}
+
+inline bool
+CacheRange::isMulti() const
+{
+  return _r.isMulti();
+}
+
+inline bool
+CacheRange::hasPendingRangeShift() const
+{
+  return _pending_range_shift_p;
+}
+
+inline CacheRange &
+CacheRange::consumeRangeShift()
+{
+  _pending_range_shift_p = false;
+  return *this;
+}
+
+inline MIMEField *
+CacheRange::getContentTypeField() const
+{
+  return _ct_field;
+}
+
+inline size_t
+CacheRange::count() const
+{
+  return _r.count();
+}
+
+inline HTTPRangeSpec::Range &CacheRange::operator[](int n)
+{
+  return _r[n];
+}
+
+inline HTTPRangeSpec::Range const &CacheRange::operator[](int n) const
+{
+  return _r[n];
+}
+
+inline CacheRange &
+CacheRange::setContentTypeFromResponse(HTTPHdr *resp)
+{
+  _ct_field = resp->field_find(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
+  return *this;
 }
 
 #endif /* __CACHE_HTTP_H__ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/P_CacheInternal.h
----------------------------------------------------------------------
diff --git a/iocore/cache/P_CacheInternal.h b/iocore/cache/P_CacheInternal.h
index 28cb44f..e215903 100644
--- a/iocore/cache/P_CacheInternal.h
+++ b/iocore/cache/P_CacheInternal.h
@@ -225,6 +225,10 @@ extern int cache_config_read_while_writer_max_retries;
 
 // CacheVC
 struct CacheVC : public CacheVConnection {
+  typedef CacheVC self;                                                  ///< Self reference type.
+  typedef HTTPCacheAlt::FragmentDescriptor FragmentDescriptor;           ///< Import type.
+  typedef HTTPCacheAlt::FragmentDescriptorTable FragmentDescriptorTable; ///< Import type.
+
   CacheVC();
 
   VIO *do_io_read(Continuation *c, int64_t nbytes, MIOBuffer *buf);
@@ -274,7 +278,9 @@ struct CacheVC : public CacheVConnection {
     return -1;
   }
 
-  bool writer_done();
+  Action *do_write_init();
+
+  //  bool writer_done();
   int calluser(int event);
   int callcont(int event);
   int die();
@@ -294,6 +300,7 @@ struct CacheVC : public CacheVConnection {
   int openReadReadDone(int event, Event *e);
   int openReadMain(int event, Event *e);
   int openReadStartEarliest(int event, Event *e);
+  int openReadWaitEarliest(int evid, Event *e);
 #ifdef HTTP_CACHE
   int openReadVecWrite(int event, Event *e);
 #endif
@@ -301,7 +308,7 @@ struct CacheVC : public CacheVConnection {
   int openReadFromWriter(int event, Event *e);
   int openReadFromWriterMain(int event, Event *e);
   int openReadFromWriterFailure(int event, Event *);
-  int openReadChooseWriter(int event, Event *e);
+  //  int openReadChooseWriter(int event, Event *e);
 
   int openWriteCloseDir(int event, Event *e);
   int openWriteCloseHeadDone(int event, Event *e);
@@ -311,13 +318,18 @@ struct CacheVC : public CacheVConnection {
   int openWriteRemoveVector(int event, Event *e);
   int openWriteWriteDone(int event, Event *e);
   int openWriteOverwrite(int event, Event *e);
+  int openWriteInit(int event, Event *e);
   int openWriteMain(int event, Event *e);
   int openWriteStartDone(int event, Event *e);
   int openWriteStartBegin(int event, Event *e);
+  int openWriteEmptyEarliestDone(int event, Event *e);
 
   int updateVector(int event, Event *e);
   int updateReadDone(int event, Event *e);
   int updateVecWrite(int event, Event *e);
+  int updateWriteStateFromRange();
+
+  int closeReadAndFree(int event, Event *e);
 
   int removeEvent(int event, Event *e);
 
@@ -357,12 +369,35 @@ struct CacheVC : public CacheVConnection {
       @return The address of the start of the fragment table,
       or @c NULL if there is no fragment table.
   */
-  virtual HTTPInfo::FragOffset *get_frag_table();
+  virtual HTTPInfo::FragmentDescriptorTable *get_frag_table();
   /** Load alt pointers and do fixups if needed.
       @return Length of header data used for alternates.
    */
   virtual uint32_t load_http_info(CacheHTTPInfoVector *info, struct Doc *doc, RefCountObj *block_ptr = NULL);
+
+  /// Change member @a key to be the key for the @a idx 'th fragment.
+  void update_key_to_frag_idx(int idx);
+  /// Compute the index of the fragment that contains the byte at content location @a offset.
+  int frag_idx_for_offset(uint64_t offset);
+
+  virtual char const *get_http_range_boundary_string(int *len) const;
+  virtual int64_t get_effective_content_size();
+  virtual void set_full_content_length(int64_t size);
+  virtual bool get_uncached(HTTPRangeSpec const &req, HTTPRangeSpec &result, int64_t initial);
+  /** This sets a range for data flowing in to the cache VC.
+      The CacheVC will write the incoming data to this part of the overall object.
+      @internal It's done this way to isolate the CacheVC from parsing range separators
+      in multi-range responses.
+  */
+  virtual int64_t set_inbound_range(int64_t min, int64_t max);
+  /** Select the ranges to apply to the content.
+      @internal In this case the CacheVC has to know the entire set of ranges so it can correctly
+      compute the actual output size (vs. the content size).
+  */
+  virtual void set_content_range(HTTPRangeSpec const &range);
+
 #endif
+
   virtual bool is_pread_capable();
   virtual bool set_pin_in_cache(time_t time_pin);
   virtual time_t get_pin_in_cache();
@@ -386,6 +421,9 @@ struct CacheVC : public CacheVConnection {
   // before being used by the CacheVC
   CacheKey key, first_key, earliest_key, update_key;
   Dir dir, earliest_dir, overwrite_dir, first_dir;
+  /// Thread to use to wake up this VC. Set when the VC puts itself on a wait list.
+  /// The waker should schedule @c EVENT_IMMEDIATE on this thread to wake up this VC.
+  EThread *wake_up_thread;
   // end Region A
 
   // Start Region B
@@ -405,11 +443,14 @@ struct CacheVC : public CacheVConnection {
 
   OpenDirEntry *od;
   AIOCallbackInternal io;
-  int alternate_index; // preferred position in vector
-  LINK(CacheVC, opendir_link);
+  int alternate_index;         // preferred position in vector
+  LINK(CacheVC, OpenDir_Link); ///< Reader/writer link per alternate in @c OpenDir.
+  LINK(CacheVC, Active_Link);  ///< Active I/O pending list in @c OpenDir.
 #ifdef CACHE_STAT_PAGES
   LINK(CacheVC, stat_link);
 #endif
+  CacheRange resp_range; ///< Tracking information for range data for response.
+  //  CacheRange uncached_range;      ///< The ranges in the request that are not in cache.
   // end Region B
 
   // Start Region C
@@ -451,6 +492,12 @@ struct CacheVC : public CacheVConnection {
   uint64_t total_len;    // total length written and available to write
   uint64_t doc_len;      // total_length (of the selected alternate for HTTP)
   uint64_t update_len;
+  HTTPRangeSpec::Range write_range; ///< Object based range for incoming partial content.
+  /// The offset in the content of the first byte beyond the end of the current fragment.
+  /// @internal This seems very weird but I couldn't figure out how to keep the more sensible
+  /// lower bound correctly updated.
+  /// The lower bound can can computed by subtracting doc->len from this value.
+  uint64_t frag_upper_bound;
   int fragment;
   int scan_msec_delay;
   CacheVC *write_vc;
@@ -522,7 +569,10 @@ extern CacheSync *cacheDirSync;
 // Function Prototypes
 #ifdef HTTP_CACHE
 int cache_write(CacheVC *, CacheHTTPInfoVector *);
-int get_alternate_index(CacheHTTPInfoVector *cache_vector, CacheKey key);
+/// Get the index for the alternate indentified by @a key in @a cache_vector.
+/// @a idx is a hint - that index is checked first and if not there the vector is scanned.
+/// This makes repeated access faster if the vector is not being updated.
+int get_alternate_index(CacheHTTPInfoVector *cache_vector, CacheKey key, int idx = -1);
 #endif
 CacheVC *new_DocEvacuator(int nbytes, Vol *d);
 
@@ -598,6 +648,7 @@ free_CacheVC(CacheVC *cont)
   cont->alternate_index = CACHE_ALT_INDEX_DEFAULT;
   if (cont->scan_vol_map)
     ats_free(cont->scan_vol_map);
+  cont->resp_range.clear();
   memset((char *)&cont->vio, 0, cont->size_to_init);
 #ifdef CACHE_STAT_PAGES
   ink_assert(!cont->stat_link.next && !cont->stat_link.prev);
@@ -723,6 +774,7 @@ CacheVC::do_write_lock_call()
   return handleWriteLock(EVENT_CALL, 0);
 }
 
+#if 0
 TS_INLINE bool
 CacheVC::writer_done()
 {
@@ -740,8 +792,9 @@ CacheVC::writer_done()
     return true;
   return false;
 }
+#endif
 
-TS_INLINE int
+TS_INLINE void
 Vol::close_write(CacheVC *cont)
 {
 #ifdef CACHE_STAT_PAGES
@@ -749,12 +802,12 @@ Vol::close_write(CacheVC *cont)
   stat_cache_vcs.remove(cont, cont->stat_link);
   ink_assert(!cont->stat_link.next && !cont->stat_link.prev);
 #endif
-  return open_dir.close_write(cont);
+  open_dir.close_entry(cont);
 }
 
 // Returns 0 on success or a positive error code on failure
 TS_INLINE int
-Vol::open_write(CacheVC *cont, int allow_if_writers, int max_writers)
+Vol::open_write(CacheVC *cont)
 {
   Vol *vol = this;
   bool agg_error = false;
@@ -768,7 +821,9 @@ Vol::open_write(CacheVC *cont, int allow_if_writers, int max_writers)
     CACHE_INCREMENT_DYN_STAT(cache_write_backlog_failure_stat);
     return ECACHE_WRITE_FAIL;
   }
-  if (open_dir.open_write(cont, allow_if_writers, max_writers)) {
+  ink_assert(NULL == cont->od);
+  if (NULL != (cont->od = open_dir.open_entry(this, cont->first_key, true))) {
+    cont->write_vector = &cont->od->vector;
 #ifdef CACHE_STAT_PAGES
     ink_assert(cont->mutex->thread_holding == this_ethread());
     ink_assert(!cont->stat_link.next && !cont->stat_link.prev);
@@ -786,26 +841,23 @@ Vol::close_write_lock(CacheVC *cont)
   CACHE_TRY_LOCK(lock, mutex, t);
   if (!lock.is_locked())
     return -1;
-  return close_write(cont);
+  this->close_write(cont);
+  return 0;
 }
 
 TS_INLINE int
-Vol::open_write_lock(CacheVC *cont, int allow_if_writers, int max_writers)
+Vol::open_write_lock(CacheVC *cont)
 {
   EThread *t = cont->mutex->thread_holding;
   CACHE_TRY_LOCK(lock, mutex, t);
-  if (!lock.is_locked())
-    return -1;
-  return open_write(cont, allow_if_writers, max_writers);
+  return lock.is_locked() ? this->open_write(cont) : -1;
 }
 
 TS_INLINE OpenDirEntry *
 Vol::open_read_lock(INK_MD5 *key, EThread *t)
 {
   CACHE_TRY_LOCK(lock, mutex, t);
-  if (!lock.is_locked())
-    return NULL;
-  return open_dir.open_read(key);
+  return lock.is_locked() ? open_dir.open_entry(this, *key, false) : NULL;
 }
 
 TS_INLINE int
@@ -871,6 +923,18 @@ rand_CacheKey(CacheKey *next_key, ProxyMutex *mutex)
   next_key->b[1] = mutex->thread_holding->generator.random();
 }
 
+#if 1
+void TS_INLINE
+next_CacheKey(CacheKey *next_key, CacheKey *key)
+{
+  next_key->next(*key);
+}
+void TS_INLINE
+prev_CacheKey(CacheKey *prev_key, CacheKey *key)
+{
+  prev_key->prev(*key);
+}
+#else
 extern uint8_t CacheKey_next_table[];
 void TS_INLINE
 next_CacheKey(CacheKey *next_key, CacheKey *key)
@@ -891,6 +955,7 @@ prev_CacheKey(CacheKey *prev_key, CacheKey *key)
     b[i] = 256 + CacheKey_prev_table[k[i]] - k[i - 1];
   b[0] = CacheKey_prev_table[k[0]];
 }
+#endif
 
 TS_INLINE unsigned int
 next_rand(unsigned int *p)
@@ -959,6 +1024,7 @@ struct Cache {
 
   Action *lookup(Continuation *cont, const CacheKey *key, CacheFragType type, const char *hostname, int host_len);
   inkcoreapi Action *open_read(Continuation *cont, const CacheKey *key, CacheFragType type, const char *hostname, int len);
+  inkcoreapi Action *open_read(Continuation *cont, CacheVConnection* writer, HTTPHdr* client_request);
   inkcoreapi Action *open_write(Continuation *cont, const CacheKey *key, CacheFragType frag_type, int options = 0,
                                 time_t pin_in_cache = (time_t)0, const char *hostname = 0, int host_len = 0);
   inkcoreapi Action *remove(Continuation *cont, const CacheKey *key, CacheFragType type = CACHE_FRAG_TYPE_HTTP,
@@ -1033,6 +1099,7 @@ cache_hash(const INK_MD5 &md5)
 #include "P_ClusterInline.h"
 #endif
 
-LINK_DEFINITION(CacheVC, opendir_link)
+LINK_DEFINITION(CacheVC, OpenDir_Link)
+LINK_DEFINITION(CacheVC, Active_Link)
 
 #endif /* _P_CACHE_INTERNAL_H__ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cache/P_CacheVol.h
----------------------------------------------------------------------
diff --git a/iocore/cache/P_CacheVol.h b/iocore/cache/P_CacheVol.h
index 10eddc6..5b5e530 100644
--- a/iocore/cache/P_CacheVol.h
+++ b/iocore/cache/P_CacheVol.h
@@ -184,10 +184,10 @@ struct Vol : public Continuation {
 
   int recover_data();
 
-  int open_write(CacheVC *cont, int allow_if_writers, int max_writers);
-  int open_write_lock(CacheVC *cont, int allow_if_writers, int max_writers);
-  int close_write(CacheVC *cont);
-  int close_write_lock(CacheVC *cont);
+  int open_write(CacheVC *cont);
+  int open_write_lock(CacheVC *cont);
+  void close_write(CacheVC *cont);
+  int close_write_lock(CacheVC *cont); // can fail lock
   int begin_read(CacheVC *cont);
   int begin_read_lock(CacheVC *cont);
   // unused read-write interlock code
@@ -482,7 +482,7 @@ free_EvacuationBlock(EvacuationBlock *b, EThread *t)
 TS_INLINE OpenDirEntry *
 Vol::open_read(const CryptoHash *key)
 {
-  return open_dir.open_read(key);
+  return open_dir.open_entry(this, *key, false);
 }
 
 TS_INLINE int

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cluster/ClusterCache.cc
----------------------------------------------------------------------
diff --git a/iocore/cluster/ClusterCache.cc b/iocore/cluster/ClusterCache.cc
index c4480e1..534703d 100644
--- a/iocore/cluster/ClusterCache.cc
+++ b/iocore/cluster/ClusterCache.cc
@@ -2624,7 +2624,7 @@ CacheContinuation::getObjectSize(VConnection *vc, int opcode, CacheHTTPInfo *ret
     } else {
       new_ci.object_size_set(object_size);
     }
-    new_ci.m_alt->m_writeable = 1;
+    new_ci.m_alt->m_flag.writeable_p = true;
     ret_ci->copy_shallow(&new_ci);
   }
   ink_release_assert(object_size);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cluster/ClusterVConnection.cc
----------------------------------------------------------------------
diff --git a/iocore/cluster/ClusterVConnection.cc b/iocore/cluster/ClusterVConnection.cc
index 7a3185e..d831b39 100644
--- a/iocore/cluster/ClusterVConnection.cc
+++ b/iocore/cluster/ClusterVConnection.cc
@@ -631,4 +631,5 @@ ClusterVConnection::get_disk_io_priority()
   return disk_io_priority;
 }
 
+
 // End of ClusterVConnection.cc

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cluster/P_Cluster.h
----------------------------------------------------------------------
diff --git a/iocore/cluster/P_Cluster.h b/iocore/cluster/P_Cluster.h
index 0376f63..b82f2e0 100644
--- a/iocore/cluster/P_Cluster.h
+++ b/iocore/cluster/P_Cluster.h
@@ -46,7 +46,7 @@
 #include "P_ClusterLoadMonitor.h"
 #include "P_TimeTrace.h"
 
-
+#if 0 // defined in InkErrno.h
 #define ECLUSTER_NO_VC (CLUSTER_ERRNO + 0)
 #define ECLUSTER_NO_MACHINE (CLUSTER_ERRNO + 1)
 #define ECLUSTER_OP_TIMEOUT (CLUSTER_ERRNO + 2)
@@ -54,6 +54,7 @@
 #define ECLUSTER_ORB_EIO (CLUSTER_ERRNO + 4)
 #define ECLUSTER_CHANNEL_INUSE (CLUSTER_ERRNO + 5)
 #define ECLUSTER_NOMORE_CHANNELS (CLUSTER_ERRNO + 6)
+#endif
 
 int init_clusterprocessor(void);
 enum {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/iocore/cluster/P_ClusterCache.h
----------------------------------------------------------------------
diff --git a/iocore/cluster/P_ClusterCache.h b/iocore/cluster/P_ClusterCache.h
index 37f49e1..e68ce8f 100644
--- a/iocore/cluster/P_ClusterCache.h
+++ b/iocore/cluster/P_ClusterCache.h
@@ -357,6 +357,27 @@ struct ClusterVConnectionBase : public CacheVConnection {
   virtual void do_io_close(int lerrno = -1);
   virtual VIO *do_io_pread(Continuation *, int64_t, MIOBuffer *, int64_t);
 
+  // TODO - fix these to work for cluster.
+  // I think the best approach is to foist the work off to the source peer and have it do
+  // the range formatting which we then just pass through. For now, this just prevents
+  // link problems so I can get the base case to work.
+  virtual void
+  set_content_range(HTTPRangeSpec const &)
+  {
+    return;
+  }
+  virtual char const *
+  get_http_range_boundary_string(int *) const
+  {
+    return NULL;
+  }
+  virtual int64_t
+  get_effective_content_size()
+  {
+    return this->get_object_size();
+  }
+  virtual void set_full_content_length(int64_t) {} // only used when writing to cache
+
   // Set the timeouts associated with this connection.
   // active_timeout is for the total elasped time of the connection.
   // inactivity_timeout is the elapsed time *while an operation was
@@ -388,6 +409,7 @@ struct ClusterVConnectionBase : public CacheVConnection {
   ink_hrtime active_timeout_in;
   Event *inactivity_timeout;
   Event *active_timeout;
+  CacheRange resp_range;
 
   virtual void reenable(VIO *);
   virtual void reenable_re(VIO *);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/lib/ts/CryptoHash.h
----------------------------------------------------------------------
diff --git a/lib/ts/CryptoHash.h b/lib/ts/CryptoHash.h
index 28b6296..316d9f1 100644
--- a/lib/ts/CryptoHash.h
+++ b/lib/ts/CryptoHash.h
@@ -26,8 +26,15 @@
 /// Apache Traffic Server commons.
 namespace ats
 {
+/// Permutation table for computing next hash.
+extern uint8_t const CRYPTO_HASH_NEXT_TABLE[256];
+/// Permutation table for computing previous hash.
+extern uint8_t const CRYPTO_HASH_PREV_TABLE[256];
+
 /// Crypto hash output.
 union CryptoHash {
+  typedef CryptoHash self; ///< Self reference type.
+
   uint64_t b[2]; // Legacy placeholder
   uint64_t u64[2];
   uint32_t u32[4];
@@ -51,7 +58,7 @@ union CryptoHash {
   /// Equality - bitwise identical.
   bool operator==(CryptoHash const &that) const { return u64[0] == that.u64[0] && u64[1] == that.u64[1]; }
 
-  /// Equality - bitwise identical.
+  /// Inequality - bitwise identical.
   bool operator!=(CryptoHash const &that) const { return !(*this == that); }
 
   /// Reduce to 64 bit value.
@@ -85,6 +92,44 @@ union CryptoHash {
   {
     return ink_code_to_hex_str(buffer, u8);
   }
+
+  /// Check for the zero key.
+  bool
+  is_zero() const
+  {
+    return 0 == (u64[0] | u64[1]);
+  }
+
+  /// Update the key to the computationally chained next key from @a that.
+  void
+  next(self const &that)
+  {
+    u8[0] = CRYPTO_HASH_NEXT_TABLE[that.u8[0]];
+    for (unsigned int i = 1; i < sizeof(u8); ++i)
+      u8[i] = CRYPTO_HASH_NEXT_TABLE[(u8[i - 1] + that.u8[i]) & 0xFF];
+  }
+  /// Update the key to the computationally chained next key.
+  void
+  next()
+  {
+    this->next(*this);
+  }
+
+  /// Update the key to the computationally chained previous key from @a that.
+  void
+  prev(self const &that)
+  {
+    for (unsigned int i = sizeof(u8) - 1; i > 0; --i)
+      u8[i] = 256 + CRYPTO_HASH_PREV_TABLE[that.u8[i]] - that.u8[i - 1];
+    u8[0] = CRYPTO_HASH_PREV_TABLE[that.u8[0]];
+  }
+
+  /// Update the key to the computationally chained previous key.
+  void
+  prev()
+  {
+    this->prev(*this);
+  }
 };
 
 extern CryptoHash const CRYPTO_HASH_ZERO;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/lib/ts/InkErrno.h
----------------------------------------------------------------------
diff --git a/lib/ts/InkErrno.h b/lib/ts/InkErrno.h
index f3f796a..3915d4e 100644
--- a/lib/ts/InkErrno.h
+++ b/lib/ts/InkErrno.h
@@ -66,6 +66,8 @@
 #define ECACHE_NOT_READY (CACHE_ERRNO + 7)
 #define ECACHE_ALT_MISS (CACHE_ERRNO + 8)
 #define ECACHE_BAD_READ_REQUEST (CACHE_ERRNO + 9)
+#define ECACHE_INVALID_RANGE (CACHE_ERRNO + 10)
+#define ECACHE_UNSATISFIABLE_RANGE (CACHE_ERRNO + 11)
 
 #define EHTTP_ERROR (HTTP_ERRNO + 0)
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/lib/ts/ParseRules.cc
----------------------------------------------------------------------
diff --git a/lib/ts/ParseRules.cc b/lib/ts/ParseRules.cc
index 5864e70..a1885dd 100644
--- a/lib/ts/ParseRules.cc
+++ b/lib/ts/ParseRules.cc
@@ -306,3 +306,21 @@ ink_atoi64(const char *str, int len)
   }
   return num;
 }
+
+uint64_t
+ats_strto64(char const *s, size_t len, size_t *used)
+{
+  uint64_t zret = 0;
+  char const *spot = s;
+
+  if (s && len) {
+    for (char const *limit = spot + len; spot < limit && ParseRules::is_digit(*spot); ++spot) {
+      zret *= 10;
+      zret += *spot - '0';
+    }
+  }
+
+  if (used)
+    *used = spot - s;
+  return zret;
+}

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/lib/ts/ParseRules.h
----------------------------------------------------------------------
diff --git a/lib/ts/ParseRules.h b/lib/ts/ParseRules.h
index ce5c676..db65997 100644
--- a/lib/ts/ParseRules.h
+++ b/lib/ts/ParseRules.h
@@ -842,4 +842,24 @@ ink_atoui(const char *str)
     return static_cast<int>(val);
 }
 
+/** Convert a span of characters to an unsigned 64 bit value.
+
+    Parsing starts at @a s and continues for at most @a len characters.
+    Parsing stops when the first non-digit is encountered. Leading whitespace is not permitted.
+    @a *used is set to the number of characters parsed if @a used is not @c NULL.
+    If @a *used is set to 0 and 0 is returned, then no characters were parsed.
+
+    Key features
+
+    - No termination required.
+    - Number of parsed characters returned.
+    - Unsigned 64 bit return.
+    - Clip to UINT64_MAX;
+
+    @return The binary equivalent of @a s.
+
+    @internal All of these conversions and none work as I need. Sigh.
+*/
+uint64_t ats_strto64(char const *s, size_t len, size_t *used);
+
 #endif /* #if !defined (_ParseRules_h_) */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/lib/ts/TsBuffer.h
----------------------------------------------------------------------
diff --git a/lib/ts/TsBuffer.h b/lib/ts/TsBuffer.h
index 4da12ff..4f4b2ee 100644
--- a/lib/ts/TsBuffer.h
+++ b/lib/ts/TsBuffer.h
@@ -219,6 +219,7 @@ struct ConstBuffer {
   self &set(char const *start, ///< First valid character.
             char const *end    ///< First invalid character.
             );
+
   /// Reset to empty.
   self &reset();
 
@@ -238,7 +239,7 @@ struct ConstBuffer {
       This is convenient when tokenizing and @a p points at the token
       separator.
 
-      @note If @a *p is not in the buffer then @a this is not changed
+      @note If @a *p is in the buffer then @a this is not changed
       and an empty buffer is returned. This means the caller can
       simply pass the result of @c find and check for an empty
       buffer returned to detect no more separators.
@@ -263,17 +264,20 @@ struct ConstBuffer {
       @return A buffer containing data up to but not including @a p.
   */
   self splitOn(char c);
+
   /** Get a trailing segment of the buffer.
 
       @return A buffer that contains all data after @a p.
   */
   self after(char const *p) const;
+
   /** Get a trailing segment of the buffer.
 
       @return A buffer that contains all data after the first
       occurrence of @a c.
   */
   self after(char c) const;
+
   /** Remove trailing segment.
 
       Data at @a p and beyond is removed from the buffer.
@@ -282,6 +286,48 @@ struct ConstBuffer {
       @return @a this.
   */
   self &clip(char const *p);
+
+  /** Remove initial instances of @a c.
+
+      @return @c true if not all characters were skipped, @c false if all characters matched @a c.
+      @see trim
+  */
+  bool skip(char c);
+
+  /** Remove leading characters that satisfy a @a predicate.
+      @return @c true if not all characters were skipped, @c false if all characters matched the @a predicate.
+
+      @internal We template this because the @c ParseRules predicates (which are the usual suspects)
+      return an integral type that is not @c bool.
+  */
+  template <typename BOOL_EQUIV ///< Type that can be automatically converted to bool
+            >
+  bool skip(BOOL_EQUIV (*predicate)(char));
+
+  /** Remove an initial instance the string @a str.
+
+      If the initial characters of the buffer match @a str (ignoring case) then the buffer is advanced past @a str.
+
+      @return @c true if matched and skipped, @c false otherwise.
+  */
+  bool skipNoCase(self const &str);
+
+  /** Remove trailing instances of @a c.
+
+      @return @c true if not all characters were trimmed, @c false if all characters matched @a c.
+      @see @c skip
+  */
+  bool trim(char c);
+
+  /** Remove trailing characters that satisfy a @a predicate.
+      @return @c true if not all characters were trimmed, @c false if all characters matched the @a predicate.
+
+      @internal We template this because the @c ParseRules predicates (which are the usual suspects)
+      return an integral type that is not @c bool.
+  */
+  template <typename BOOL_EQUIV ///< Type that can be automatically converted to bool
+            >
+  bool trim(BOOL_EQUIV (*predicate)(char));
 };
 
 // ----------------------------------------------------------
@@ -500,6 +546,55 @@ ConstBuffer::clip(char const *p)
   return *this;
 }
 
+template <typename BOOL_EQUIV>
+inline bool
+ConstBuffer::skip(BOOL_EQUIV (*predicate)(char))
+{
+  while (*this && predicate(**this))
+    ++*this;
+  return *this;
+}
+inline bool
+ConstBuffer::skip(char c)
+{
+  while (*this && c == **this)
+    ++*this;
+  return *this;
+}
+
+template <typename BOOL_EQUIV>
+inline bool
+ConstBuffer::trim(BOOL_EQUIV (*predicate)(char))
+{
+  if (NULL != _ptr) {
+    while (_size && predicate(_ptr[_size - 1]))
+      --_size;
+  }
+  return *this;
+}
+
+inline bool
+ConstBuffer::skipNoCase(self const &str)
+{
+  bool zret = true;
+  if (str._size <= _size && 0 == strncasecmp(_ptr, str._ptr, str._size))
+    *this += str._size;
+  else
+    zret = false;
+  return zret;
+}
+
+inline bool
+ConstBuffer::trim(char c)
+{
+  if (NULL != _ptr) {
+    while (_size && c == _ptr[_size - 1])
+      --_size;
+  }
+
+  return *this;
+}
+
 } // end namespace
 
 typedef ts::Buffer TsBuffer;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/lib/ts/ink_code.cc
----------------------------------------------------------------------
diff --git a/lib/ts/ink_code.cc b/lib/ts/ink_code.cc
index e099b9c..f5db817 100644
--- a/lib/ts/ink_code.cc
+++ b/lib/ts/ink_code.cc
@@ -154,3 +154,32 @@ ink_code_to_hex_str(char *dest33, uint8_t const *hash)
   *d = '\0';
   return (dest33);
 }
+
+namespace ats
+{
+uint8_t const CRYPTO_HASH_NEXT_TABLE[256] = {
+  21,  53,  167, 51,  255, 126, 241, 151, 115, 66,  155, 174, 226, 215, 80,  188, 12,  95,  8,   24,  162, 201, 46,  104, 79,  172,
+  39,  68,  56,  144, 142, 217, 101, 62,  14,  108, 120, 90,  61,  47,  132, 199, 110, 166, 83,  125, 57,  65,  19,  130, 148, 116,
+  228, 189, 170, 1,   71,  0,   252, 184, 168, 177, 88,  229, 242, 237, 183, 55,  13,  212, 240, 81,  211, 74,  195, 205, 147, 93,
+  30,  87,  86,  63,  135, 102, 233, 106, 118, 163, 107, 10,  243, 136, 160, 119, 43,  161, 206, 141, 203, 78,  175, 36,  37,  140,
+  224, 197, 185, 196, 248, 84,  122, 73,  152, 157, 18,  225, 219, 145, 45,  2,   171, 249, 173, 32,  143, 137, 69,  41,  35,  89,
+  33,  98,  179, 214, 114, 231, 251, 123, 180, 194, 29,  3,   178, 31,  192, 164, 15,  234, 26,  230, 91,  156, 5,   16,  23,  244,
+  58,  50,  4,   67,  134, 165, 60,  235, 250, 7,   138, 216, 49,  139, 191, 154, 11,  52,  239, 59,  111, 245, 9,   64,  25,  129,
+  247, 232, 190, 246, 109, 22,  112, 210, 221, 181, 92,  169, 48,  100, 193, 77,  103, 133, 70,  220, 207, 223, 176, 204, 76,  186,
+  200, 208, 158, 182, 227, 222, 131, 38,  187, 238, 6,   34,  253, 128, 146, 44,  94,  127, 105, 153, 113, 20,  27,  124, 159, 17,
+  72,  218, 96,  149, 213, 42,  28,  254, 202, 40,  117, 82,  97,  209, 54,  236, 121, 75,  85,  150, 99,  198,
+};
+
+uint8_t const CRYPTO_HASH_PREV_TABLE[256] = {
+  57,  55,  119, 141, 158, 152, 218, 165, 18,  178, 89,  172, 16,  68,  34,  146, 153, 233, 114, 48,  229, 0,   187, 154, 19,  180,
+  148, 230, 240, 140, 78,  143, 123, 130, 219, 128, 101, 102, 215, 26,  243, 127, 239, 94,  223, 118, 22,  39,  194, 168, 157, 3,
+  173, 1,   248, 67,  28,  46,  156, 175, 162, 38,  33,  81,  179, 47,  9,   159, 27,  126, 200, 56,  234, 111, 73,  251, 206, 197,
+  99,  24,  14,  71,  245, 44,  109, 252, 80,  79,  62,  129, 37,  150, 192, 77,  224, 17,  236, 246, 131, 254, 195, 32,  83,  198,
+  23,  226, 85,  88,  35,  186, 42,  176, 188, 228, 134, 8,   51,  244, 86,  93,  36,  250, 110, 137, 231, 45,  5,   225, 221, 181,
+  49,  214, 40,  199, 160, 82,  91,  125, 166, 169, 103, 97,  30,  124, 29,  117, 222, 76,  50,  237, 253, 7,   112, 227, 171, 10,
+  151, 113, 210, 232, 92,  95,  20,  87,  145, 161, 43,  2,   60,  193, 54,  120, 25,  122, 11,  100, 204, 61,  142, 132, 138, 191,
+  211, 66,  59,  106, 207, 216, 15,  53,  184, 170, 144, 196, 139, 74,  107, 105, 255, 41,  208, 21,  242, 98,  205, 75,  96,  202,
+  209, 247, 189, 72,  69,  238, 133, 13,  167, 31,  235, 116, 201, 190, 213, 203, 104, 115, 12,  212, 52,  63,  149, 135, 183, 84,
+  147, 163, 249, 65,  217, 174, 70,  6,   64,  90,  155, 177, 185, 182, 108, 121, 164, 136, 58,  220, 241, 4,
+};
+}

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/Alarms.cc
----------------------------------------------------------------------
diff --git a/mgmt/Alarms.cc b/mgmt/Alarms.cc
index 2b260b9..e087aa4 100644
--- a/mgmt/Alarms.cc
+++ b/mgmt/Alarms.cc
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #include "libts.h"
 #include "LocalManager.h"
 #include "ClusterCom.h"
@@ -67,7 +66,6 @@ Alarms::Alarms()
   alarmOEMcount = minOEMkey;
 } /* End Alarms::Alarms */
 
-
 Alarms::~Alarms()
 {
   ink_hash_table_destroy(cblist);
@@ -76,7 +74,6 @@ Alarms::~Alarms()
   ink_mutex_destroy(&mutex);
 } /* End Alarms::Alarms */
 
-
 void
 Alarms::registerCallback(AlarmCallbackFunc func)
 {
@@ -89,7 +86,6 @@ Alarms::registerCallback(AlarmCallbackFunc func)
   ink_mutex_release(&mutex);
 } /* End Alarms::registerCallback */
 
-
 bool
 Alarms::isCurrentAlarm(alarm_t a, char *ip)
 {
@@ -113,7 +109,6 @@ Alarms::isCurrentAlarm(alarm_t a, char *ip)
   return ret;
 } /* End Alarms::isCurrentAlarm */
 
-
 void
 Alarms::resolveAlarm(alarm_t a, char *ip)
 {
@@ -147,7 +142,6 @@ Alarms::resolveAlarm(alarm_t a, char *ip)
   return;
 } /* End Alarms::resolveAlarm */
 
-
 void
 Alarms::signalAlarm(alarm_t a, const char *desc, const char *ip)
 {
@@ -294,14 +288,14 @@ Alarms::signalAlarm(alarm_t a, const char *desc, const char *ip)
     (*(func))(a, ip, desc);
   }
 
-  /* Priority 2 alarms get signalled if they are the first unsolved occurence. */
+  /* Priority 2 alarms get signalled if they are the first unsolved occurence.
+   */
   if (priority == 2 && !ip) {
     execAlarmBin(desc);
   }
 
 } /* End Alarms::signalAlarm */
 
-
 /*
  * resetSeenFlag(...)
  *   Function resets the "seen" flag for a given peer's alarms. This allows
@@ -327,7 +321,6 @@ Alarms::resetSeenFlag(char *ip)
   return;
 } /* End Alarms::resetSeenFlag */
 
-
 /*
  * clearUnSeen(...)
  *   This function is a sweeper functionto clean up those alarms that have
@@ -357,7 +350,6 @@ Alarms::clearUnSeen(char *ip)
   return;
 } /* End Alarms::clearUnSeen */
 
-
 /*
  * constructAlarmMessage(...)
  *   This functions builds a message buffer for passing to peers. It basically
@@ -418,7 +410,6 @@ Alarms::constructAlarmMessage(const AppVersionInfo &version, char *ip, char *mes
   return;
 } /* End Alarms::constructAlarmMessage */
 
-
 /*
  * checkSystemNAlert(...)
  *   This function should test the system and signal local alarms. Sending

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/Alarms.h
----------------------------------------------------------------------
diff --git a/mgmt/Alarms.h b/mgmt/Alarms.h
index b699d4c..be3f641 100644
--- a/mgmt/Alarms.h
+++ b/mgmt/Alarms.h
@@ -81,7 +81,6 @@ class AppVersionInfo;
 extern const char *alarmText[];
 extern const int alarmTextNum;
 
-
 /* OEM_ALARM: the alarm type is used as a key for hash tables;
    need offset and modulo constants which will keep the unique
    keys for OEM alarms within a specified range */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/BaseManager.cc
----------------------------------------------------------------------
diff --git a/mgmt/BaseManager.cc b/mgmt/BaseManager.cc
index c72c4a9..f0a275b 100644
--- a/mgmt/BaseManager.cc
+++ b/mgmt/BaseManager.cc
@@ -34,7 +34,6 @@
 #include "libts.h"
 #include "BaseManager.h"
 
-
 BaseManager::BaseManager()
 {
   /* Setup the event queue and callback tables */
@@ -43,7 +42,6 @@ BaseManager::BaseManager()
 
 } /* End BaseManager::BaseManager */
 
-
 BaseManager::~BaseManager()
 {
   InkHashTableEntry *entry;
@@ -69,7 +67,6 @@ BaseManager::~BaseManager()
   return;
 } /* End BaseManager::~BaseManager */
 
-
 /*
  * registerMgmtCallback(...)
  *   Function to register callback's for various management events, such
@@ -113,7 +110,6 @@ BaseManager::registerMgmtCallback(int msg_id, MgmtCallback cb, void *opaque_cb_d
   return msg_id;
 } /* End BaseManager::registerMgmtCallback */
 
-
 /*
  * signalMgmtEntity(...)
  */
@@ -151,7 +147,6 @@ BaseManager::signalMgmtEntity(int msg_id, char *data_raw, int data_len)
 
 } /* End BaseManager::signalMgmtEntity */
 
-
 void
 BaseManager::executeMgmtCallback(int msg_id, char *data_raw, int data_len)
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/BaseManager.h
----------------------------------------------------------------------
diff --git a/mgmt/BaseManager.h b/mgmt/BaseManager.h
index 2075d54..ba81ec9 100644
--- a/mgmt/BaseManager.h
+++ b/mgmt/BaseManager.h
@@ -40,7 +40,6 @@
 
 #include "MgmtDefs.h"
 
-
 /*******************************************
  * used by LocalManager and in Proxy Main. *
  */
@@ -106,14 +105,12 @@ typedef struct _mgmt_message_hdr_type {
   int data_len;
 } MgmtMessageHdr;
 
-
 typedef struct _mgmt_event_callback_list {
   MgmtCallback func;
   void *opaque_data;
   struct _mgmt_event_callback_list *next;
 } MgmtCallbackList;
 
-
 class BaseManager
 {
 public:
@@ -134,5 +131,4 @@ protected:
 private:
 }; /* End class BaseManager */
 
-
 #endif /* _BASE_MANAGER_H */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/FileManager.cc
----------------------------------------------------------------------
diff --git a/mgmt/FileManager.cc b/mgmt/FileManager.cc
index 55e1dd8..cd5dee0 100644
--- a/mgmt/FileManager.cc
+++ b/mgmt/FileManager.cc
@@ -55,8 +55,9 @@ FileManager::FileManager()
   // Check to see if the directory already exists, if not create it.
   if (mkdir(snapshotDir, DIR_MODE) < 0 && errno != EEXIST) {
     // Failed to create the snapshot directory
-    mgmt_fatal(stderr, 0, "[FileManager::FileManager] Failed to create the snapshot directory %s: %s\n", (const char *)snapshotDir,
-               strerror(errno));
+    mgmt_fatal(stderr, 0, "[FileManager::FileManager] Failed to create the "
+                          "snapshot directory %s: %s\n",
+               (const char *)snapshotDir, strerror(errno));
   }
 
   if (!ink_file_is_directory(snapshotDir)) {
@@ -124,7 +125,8 @@ FileManager::registerCallback(FileCallbackFunc func)
   ink_mutex_release(&cbListLock);
 }
 
-// void FileManager::addFile(char* baseFileName, const configFileInfo* file_info)
+// void FileManager::addFile(char* baseFileName, const configFileInfo*
+// file_info)
 //
 //  for the baseFile, creates a Rollback object for it
 //
@@ -291,7 +293,9 @@ FileManager::abortRestore(const char *abortTo)
 
     currentVersion = rb->getCurrentVersion();
     if (rb->revertToVersion_ml(currentVersion - 1) != OK_ROLLBACK) {
-      mgmt_fatal(stderr, 0, "[FileManager::abortRestore] Unable to abort a failed snapshot restore.  Configuration files have been "
+      mgmt_fatal(stderr, 0, "[FileManager::abortRestore] Unable to abort a "
+                            "failed snapshot restore.  Configuration files "
+                            "have been "
                             "left in a inconsistent state\n");
     }
   }
@@ -415,7 +419,9 @@ FileManager::removeSnap(const char *snapName, const char *snapDir)
   if (unlinkFailed == false) {
     if (rmdir(snapPath) < 0) {
       // strerror() isn't reentrant/thread-safe ... Problem? /leif
-      mgmt_log(stderr, "[FileManager::removeSnap] Unable to remove snapshot directory %s: %s\n", snapPath, strerror(errno));
+      mgmt_log(stderr, "[FileManager::removeSnap] Unable to remove snapshot "
+                       "directory %s: %s\n",
+               snapPath, strerror(errno));
       result = SNAP_REMOVE_FAILED;
     } else {
       result = SNAP_OK;
@@ -457,7 +463,9 @@ FileManager::takeSnap(const char *snapName, const char *snapDir)
   snapPath = newPathString(snapDir, snapName);
 
   if (mkdir(snapPath, DIR_MODE) < 0 && errno != EEXIST) {
-    mgmt_log(stderr, "[FileManager::takeSnap] Failed to create directory for snapshot %s: %s\n", snapName, strerror(errno));
+    mgmt_log(stderr, "[FileManager::takeSnap] Failed to create directory for "
+                     "snapshot %s: %s\n",
+             snapName, strerror(errno));
     delete[] snapPath;
     return SNAP_DIR_CREATE_FAILED;
   }
@@ -484,7 +492,9 @@ FileManager::takeSnap(const char *snapName, const char *snapDir)
       // Remove the failed snapshot so that we do not have a partial
       //   one hanging around
       if (removeSnap(snapName, snapDir) != SNAP_OK) {
-        mgmt_log(stderr, "[FileManager::takeSnap] Unable to remove failed snapshot %s.  This snapshot should be removed by hand\n",
+        mgmt_log(stderr, "[FileManager::takeSnap] Unable to remove failed "
+                         "snapshot %s.  This snapshot should be removed by "
+                         "hand\n",
                  snapName);
       }
       break;
@@ -596,8 +606,10 @@ FileManager::WalkSnaps(ExpandingArray *snapList)
 {
   MFresult r;
 
-  // The original code reset this->managedDir from proxy.config.snapshot_dir at this point. There doesn't appear to be
-  // any need for that, since managedDir is always set in the constructor and should not be changed.
+  // The original code reset this->managedDir from proxy.config.snapshot_dir at
+  // this point. There doesn't appear to be
+  // any need for that, since managedDir is always set in the constructor and
+  // should not be changed.
   ink_release_assert(this->managedDir != NULL);
 
   ink_mutex_acquire(&accessLock);
@@ -663,7 +675,8 @@ FileManager::isConfigStale()
   return stale;
 }
 
-// void FileManager::displaySnapPage(textBuffer* output, httpResponse& answerHdr)
+// void FileManager::displaySnapPage(textBuffer* output, httpResponse&
+// answerHdr)
 //
 //  Generates an HTML page with the add form and the list
 //    of current snapshots
@@ -684,7 +697,8 @@ FileManager::displaySnapOption(textBuffer *output)
   }
 }
 
-// void FileManger::createSelect(char* formVar, textBuffer* output, ExpandingArray*)
+// void FileManger::createSelect(char* formVar, textBuffer* output,
+// ExpandingArray*)
 //
 //  Creats a form with a select list.  The select options come
 //    from the expanding array.  Action is the value for the hidden input
@@ -693,7 +707,9 @@ FileManager::displaySnapOption(textBuffer *output)
 void
 FileManager::createSelect(char *action, textBuffer *output, ExpandingArray *options)
 {
-  const char formOpen[] = "<form method=POST action=\"/configure/snap_action.html\">\n<select name=snap>\n";
+  const char formOpen[] = "<form method=POST "
+                          "action=\"/configure/snap_action.html\">\n<select "
+                          "name=snap>\n";
   const char formEnd[] = "</form>";
   const char submitButton[] = "<input type=submit value=\"";
   const char hiddenInput[] = "<input type=hidden name=action value=";

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/LocalManager.cc
----------------------------------------------------------------------
diff --git a/mgmt/LocalManager.cc b/mgmt/LocalManager.cc
index 4d8fddf..911d0d3 100644
--- a/mgmt/LocalManager.cc
+++ b/mgmt/LocalManager.cc
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #include "libts.h"
 #include "ink_platform.h"
 #include "MgmtUtils.h"
@@ -51,7 +50,6 @@ LocalManager::mgmtCleanup()
   return;
 }
 
-
 void
 LocalManager::mgmtShutdown()
 {
@@ -71,7 +69,6 @@ LocalManager::mgmtShutdown()
   mgmtCleanup();
 }
 
-
 void
 LocalManager::processShutdown(bool mainThread)
 {
@@ -84,7 +81,6 @@ LocalManager::processShutdown(bool mainThread)
   return;
 }
 
-
 void
 LocalManager::processRestart()
 {
@@ -93,7 +89,6 @@ LocalManager::processRestart()
   return;
 }
 
-
 void
 LocalManager::processBounce()
 {
@@ -211,9 +206,11 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on),
   RecHttpLoadIp("proxy.local.incoming_ip_to_bind", m_inbound_ip4, m_inbound_ip6);
 
   if (access(sysconfdir, R_OK) == -1) {
-    mgmt_elog(0, "[LocalManager::LocalManager] unable to access() directory '%s': %d, %s\n", (const char *)sysconfdir, errno,
-              strerror(errno));
-    mgmt_fatal(0, "[LocalManager::LocalManager] please set the 'TS_ROOT' environment variable\n");
+    mgmt_elog(0, "[LocalManager::LocalManager] unable to access() directory "
+                 "'%s': %d, %s\n",
+              (const char *)sysconfdir, errno, strerror(errno));
+    mgmt_fatal(0, "[LocalManager::LocalManager] please set the 'TS_ROOT' "
+                  "environment variable\n");
   }
 
 #if TS_HAS_WCCP
@@ -234,7 +231,8 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on),
     if (located) {
       wccp_cache.loadServicesFromFile(wccp_config_str);
     } else { // not located
-      mgmt_log("[LocalManager::LocalManager] WCCP service configuration file '%s' was specified but could not be found in the file "
+      mgmt_log("[LocalManager::LocalManager] WCCP service configuration file "
+               "'%s' was specified but could not be found in the file "
                "system.\n",
                static_cast<char *>(wccp_config_str));
     }
@@ -254,7 +252,8 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on),
   // coverity[fs_check_call]
   if (access(absolute_proxy_binary, R_OK | X_OK) == -1) {
     mgmt_elog(0, "[LocalManager::LocalManager] Unable to access() '%s': %d, %s\n", absolute_proxy_binary, errno, strerror(errno));
-    mgmt_fatal(0, "[LocalManager::LocalManager] please set bin path 'proxy.config.bin_path' \n");
+    mgmt_fatal(0, "[LocalManager::LocalManager] please set bin path "
+                  "'proxy.config.bin_path' \n");
   }
 
   internal_ticker = 0;
@@ -311,9 +310,13 @@ LocalManager::initCCom(const AppVersionInfo &version, FileManager *configFiles,
 
   found = mgmt_getAddrForIntr(intrName, &cluster_ip.sa);
   if (found == false) {
-    mgmt_fatal(stderr, 0, "[LocalManager::initCCom] Unable to find network interface %s.  Exiting...\n", intrName);
+    mgmt_fatal(stderr, 0, "[LocalManager::initCCom] Unable to find network "
+                          "interface %s.  Exiting...\n",
+               intrName);
   } else if (!ats_is_ip4(&cluster_ip)) {
-    mgmt_fatal(stderr, 0, "[LocalManager::initCCom] Unable to find IPv4 network interface %s.  Exiting...\n", intrName);
+    mgmt_fatal(stderr, 0, "[LocalManager::initCCom] Unable to find IPv4 "
+                          "network interface %s.  Exiting...\n",
+               intrName);
   }
 
   ats_ip_ntop(&cluster_ip, clusterAddrStr, sizeof(clusterAddrStr));
@@ -434,7 +437,9 @@ LocalManager::pollMgmtProcessServer()
         MgmtMessageHdr *mh;
         int data_len;
 
-        mgmt_log(stderr, "[LocalManager::pollMgmtProcessServer] New process connecting fd '%d'\n", new_sockfd);
+        mgmt_log(stderr, "[LocalManager::pollMgmtProcessServer] New process "
+                         "connecting fd '%d'\n",
+                 new_sockfd);
 
         if (new_sockfd < 0) {
           mgmt_elog(stderr, errno, "[LocalManager::pollMgmtProcessServer] ==> ");
@@ -446,7 +451,8 @@ LocalManager::pollMgmtProcessServer()
           mh->data_len = data_len;
           memcpy((char *)mh + sizeof(MgmtMessageHdr), &mgmt_sync_key, data_len);
           if (mgmt_write_pipe(new_sockfd, (char *)mh, sizeof(MgmtMessageHdr) + data_len) <= 0) {
-            mgmt_elog(errno, "[LocalManager::pollMgmtProcessServer] Error writing sync key message!\n");
+            mgmt_elog(errno, "[LocalManager::pollMgmtProcessServer] Error "
+                             "writing sync key message!\n");
             close_socket(new_sockfd);
             watched_process_fd = watched_process_pid = -1;
           }
@@ -470,10 +476,14 @@ LocalManager::pollMgmtProcessServer()
           if ((res = mgmt_read_pipe(watched_process_fd, data_raw, mh_hdr.data_len)) > 0) {
             handleMgmtMsgFromProcesses(mh_full);
           } else if (res < 0) {
-            mgmt_fatal(0, "[LocalManager::pollMgmtProcessServer] Error in read (errno: %d)\n", -res);
+            mgmt_fatal(0, "[LocalManager::pollMgmtProcessServer] Error in read "
+                          "(errno: %d)\n",
+                       -res);
           }
         } else if (res < 0) {
-          mgmt_fatal(0, "[LocalManager::pollMgmtProcessServer] Error in read (errno: %d)\n", -res);
+          mgmt_fatal(0, "[LocalManager::pollMgmtProcessServer] Error in read "
+                        "(errno: %d)\n",
+                     -res);
         }
         // handle EOF
         if (res == 0) {
@@ -512,12 +522,13 @@ LocalManager::pollMgmtProcessServer()
       ink_assert(num == 0); /* Invariant */
 
     } else if (num < 0) { /* Error */
-      mgmt_elog(stderr, 0, "[LocalManager::pollMgmtProcessServer] select failed or was interrupted (%d)\n", errno);
+      mgmt_elog(stderr, 0, "[LocalManager::pollMgmtProcessServer] select "
+                           "failed or was interrupted (%d)\n",
+                errno);
     }
   }
 }
 
-
 void
 LocalManager::handleMgmtMsgFromProcesses(MgmtMessageHdr *mh)
 {
@@ -629,7 +640,6 @@ LocalManager::handleMgmtMsgFromProcesses(MgmtMessageHdr *mh)
   }
 }
 
-
 void
 LocalManager::sendMgmtMsgToProcesses(int msg_id, const char *data_str)
 {
@@ -637,7 +647,6 @@ LocalManager::sendMgmtMsgToProcesses(int msg_id, const char *data_str)
   return;
 }
 
-
 void
 LocalManager::sendMgmtMsgToProcesses(int msg_id, const char *data_raw, int data_len)
 {
@@ -651,7 +660,6 @@ LocalManager::sendMgmtMsgToProcesses(int msg_id, const char *data_raw, int data_
   return;
 }
 
-
 void
 LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh)
 {
@@ -672,7 +680,8 @@ LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh)
     mh->msg_id = MGMT_EVENT_SHUTDOWN;
     break;
   case MGMT_EVENT_ROLL_LOG_FILES:
-    mgmt_log("[LocalManager::SendMgmtMsgsToProcesses]Event is being constructed .\n");
+    mgmt_log("[LocalManager::SendMgmtMsgsToProcesses]Event is being "
+             "constructed .\n");
     break;
   case MGMT_EVENT_CONFIG_FILE_UPDATE:
   case MGMT_EVENT_CONFIG_FILE_UPDATE_NO_INC_VERSION:
@@ -704,11 +713,15 @@ LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh)
 
   if (watched_process_fd != -1) {
     if (mgmt_write_pipe(watched_process_fd, (char *)mh, sizeof(MgmtMessageHdr) + mh->data_len) <= 0) {
-      // In case of Linux, sometimes when the TS dies, the connection between TS and TM
-      // is not closed properly. the socket does not receive an EOF. So, the TM does
-      // not detect that the connection and hence TS has gone down. Hence it still
+      // In case of Linux, sometimes when the TS dies, the connection between TS
+      // and TM
+      // is not closed properly. the socket does not receive an EOF. So, the TM
+      // does
+      // not detect that the connection and hence TS has gone down. Hence it
+      // still
       // tries to send a message to TS, but encounters an error and enters here
-      // Also, ensure that this whole thing is done only once because there will be a
+      // Also, ensure that this whole thing is done only once because there will
+      // be a
       // deluge of message in the traffic.log otherwise
 
       static pid_t check_prev_pid = watched_process_pid;
@@ -744,7 +757,9 @@ LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh)
             // End of TS down
           } else {
             // TS is still up, but the connection is lost
-            const char *err_msg = "The TS-TM connection is broken for some reason. Either restart TS and TM or correct this error "
+            const char *err_msg = "The TS-TM connection is broken for some "
+                                  "reason. Either restart TS and TM or correct "
+                                  "this error "
                                   "for TM to display TS statistics correctly";
             lmgmt->alarm_keeper->signalAlarm(MGMT_ALARM_PROXY_SYSTEM_ERROR, err_msg);
           }
@@ -761,7 +776,6 @@ LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh)
   }
 }
 
-
 void
 LocalManager::signalFileChange(const char *var_name, bool incVersion)
 {
@@ -773,7 +787,6 @@ LocalManager::signalFileChange(const char *var_name, bool incVersion)
   return;
 }
 
-
 void
 LocalManager::signalEvent(int msg_id, const char *data_str)
 {
@@ -781,7 +794,6 @@ LocalManager::signalEvent(int msg_id, const char *data_str)
   return;
 }
 
-
 void
 LocalManager::signalEvent(int msg_id, const char *data_raw, int data_len)
 {
@@ -796,7 +808,6 @@ LocalManager::signalEvent(int msg_id, const char *data_raw, int data_len)
   return;
 }
 
-
 /*
  * processEventQueue()
  *   Function drains and processes the mgmt event queue
@@ -841,7 +852,6 @@ LocalManager::processEventQueue()
   }
 }
 
-
 /*
  * startProxy()
  *   Function fires up a proxy process.
@@ -866,7 +876,9 @@ LocalManager::startProxy()
   // traffic server binary exists, check permissions
   else if (access(absolute_proxy_binary, R_OK | X_OK) < 0) {
     // Error don't have proper permissions
-    mgmt_elog(stderr, errno, "[LocalManager::startProxy] Unable to access %s due to bad permisssions \n", absolute_proxy_binary);
+    mgmt_elog(stderr, errno, "[LocalManager::startProxy] Unable to access %s "
+                             "due to bad permisssions \n",
+              absolute_proxy_binary);
     return false;
   }
 
@@ -1000,7 +1012,8 @@ LocalManager::listenForProxy()
       this->bindProxyPort(p);
     }
 
-    // read backlong configuration value and overwrite the default value if found
+    // read backlong configuration value and overwrite the default value if
+    // found
     int backlog = 1024;
     bool found;
     RecInt config_backlog = REC_readInteger("proxy.config.net.listen_backlog", &found);
@@ -1016,7 +1029,6 @@ LocalManager::listenForProxy()
   return;
 }
 
-
 /*
  * bindProxyPort()
  *  Function binds the accept port of the proxy

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/MultiFile.cc
----------------------------------------------------------------------
diff --git a/mgmt/MultiFile.cc b/mgmt/MultiFile.cc
index e00b1e7..d960881 100644
--- a/mgmt/MultiFile.cc
+++ b/mgmt/MultiFile.cc
@@ -147,7 +147,6 @@ MultiFile::WalkFiles(ExpandingArray *fileList)
   return MF_OK;
 }
 
-
 bool
 MultiFile::isManaged(const char *fileName)
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/ProcessManager.cc
----------------------------------------------------------------------
diff --git a/mgmt/ProcessManager.cc b/mgmt/ProcessManager.cc
index 6c9bdf9..d823400 100644
--- a/mgmt/ProcessManager.cc
+++ b/mgmt/ProcessManager.cc
@@ -1,6 +1,7 @@
 /** @file
 
-  File contains the member function defs and thread loop for the process manager.
+  File contains the member function defs and thread loop for the process
+  manager.
 
   @section license License
 
@@ -21,7 +22,6 @@
   limitations under the License.
  */
 
-
 #include "libts.h"
 #undef HTTP_CACHE
 #include "InkAPIInternal.h"
@@ -76,7 +76,6 @@ ProcessManager::ProcessManager(bool rlm) : BaseManager(), require_lm(rlm), mgmt_
   pid = getpid();
 } /* End ProcessManager::ProcessManager */
 
-
 void
 ProcessManager::reconfigure()
 {
@@ -87,7 +86,6 @@ ProcessManager::reconfigure()
   return;
 } /* End ProcessManager::reconfigure */
 
-
 void
 ProcessManager::signalManager(int msg_id, const char *data_str)
 {
@@ -95,7 +93,6 @@ ProcessManager::signalManager(int msg_id, const char *data_str)
   return;
 } /* End ProcessManager::signalManager */
 
-
 void
 ProcessManager::signalManager(int msg_id, const char *data_raw, int data_len)
 {
@@ -110,7 +107,6 @@ ProcessManager::signalManager(int msg_id, const char *data_raw, int data_len)
 
 } /* End ProcessManager::signalManager */
 
-
 bool
 ProcessManager::processEventQueue()
 {
@@ -126,7 +122,8 @@ ProcessManager::processEventQueue()
       executeMgmtCallback(mh->msg_id, NULL, 0);
     }
     if (mh->msg_id == MGMT_EVENT_SHUTDOWN) {
-      mgmt_log(stderr, "[ProcessManager::processEventQueue] Shutdown msg received, exiting\n");
+      mgmt_log(stderr, "[ProcessManager::processEventQueue] Shutdown msg "
+                       "received, exiting\n");
       _exit(0);
     } /* Exit on shutdown */
     ats_free(mh);
@@ -135,7 +132,6 @@ ProcessManager::processEventQueue()
   return ret;
 } /* End ProcessManager::processEventQueue */
 
-
 bool
 ProcessManager::processSignalQueue()
 {
@@ -158,7 +154,6 @@ ProcessManager::processSignalQueue()
   return ret;
 } /* End ProcessManager::processSignalQueue */
 
-
 void
 ProcessManager::initLMConnection()
 {
@@ -192,7 +187,8 @@ ProcessManager::initLMConnection()
   }
 
   if ((connect(local_manager_sockfd, (struct sockaddr *)&serv_addr, servlen)) < 0) {
-    mgmt_fatal(stderr, errno, "[ProcessManager::initLMConnection] failed to connect management socket '%s'\n",
+    mgmt_fatal(stderr, errno, "[ProcessManager::initLMConnection] failed to "
+                              "connect management socket '%s'\n",
                (const char *)sockpath);
   }
 
@@ -218,14 +214,12 @@ ProcessManager::initLMConnection()
     }
   }
 
-
   if (sync_key_raw)
     memcpy(&mgmt_sync_key, sync_key_raw, sizeof(mgmt_sync_key));
   Debug("pmgmt", "[ProcessManager::initLMConnection] Received key: %d\n", mgmt_sync_key);
 
 } /* End ProcessManager::initLMConnection */
 
-
 void
 ProcessManager::pollLMConnection()
 {
@@ -263,7 +257,9 @@ ProcessManager::pollLMConnection()
       }
 
     } else if (num < 0) { /* Error */
-      mgmt_elog(stderr, 0, "[ProcessManager::pollLMConnection] select failed or was interrupted (%d)\n", errno);
+      mgmt_elog(stderr, 0, "[ProcessManager::pollLMConnection] select failed "
+                           "or was interrupted (%d)\n",
+                errno);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/ProxyConfig.h
----------------------------------------------------------------------
diff --git a/mgmt/ProxyConfig.h b/mgmt/ProxyConfig.h
index 8699296..0103b66 100644
--- a/mgmt/ProxyConfig.h
+++ b/mgmt/ProxyConfig.h
@@ -65,7 +65,8 @@ public:
   ConfigProcessor();
 
   enum {
-    // The number of seconds to wait before garbage collecting stale ConfigInfo objects. There's
+    // The number of seconds to wait before garbage collecting stale ConfigInfo
+    // objects. There's
     // no good reason to tune this, outside of regression tests, so don't.
     CONFIG_PROCESSOR_RELEASE_SECS = 60
   };
@@ -91,7 +92,8 @@ public:
   int ninfos;
 };
 
-// A Continuation wrapper that calls the static reconfigure() method of the given class.
+// A Continuation wrapper that calls the static reconfigure() method of the
+// given class.
 template <typename UpdateClass> struct ConfigUpdateContinuation : public Continuation {
   int
   update(int /* etype */, void * /* data */)

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/RecordsConfig.h
----------------------------------------------------------------------
diff --git a/mgmt/RecordsConfig.h b/mgmt/RecordsConfig.h
index 8d73023..a332b1c 100644
--- a/mgmt/RecordsConfig.h
+++ b/mgmt/RecordsConfig.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #if !defined(_RECORDS_CONFIG_H_)
 #define _RECORDS_CONFIG_H_
 
@@ -50,7 +49,8 @@ typedef void (*RecordElementCallback)(const RecordElement *, void *);
 void RecordsConfigIterate(RecordElementCallback, void *);
 
 void LibRecordsConfigInit();                 // initializes RecordsConfigIndex
-void RecordsConfigOverrideFromEnvironment(); // Override records from the environment
+void RecordsConfigOverrideFromEnvironment(); // Override records from the
+                                             // environment
 void test_librecords();
 
 #endif

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/RecordsConfigUtils.cc
----------------------------------------------------------------------
diff --git a/mgmt/RecordsConfigUtils.cc b/mgmt/RecordsConfigUtils.cc
index 7c16503..8cfdaa9 100644
--- a/mgmt/RecordsConfigUtils.cc
+++ b/mgmt/RecordsConfigUtils.cc
@@ -38,9 +38,12 @@ override_record(const RecordElement *record, void *)
 
     if ((value = RecConfigOverrideFromEnvironment(record->name, NULL))) {
       if (RecDataSetFromString(record->value_type, &data, value)) {
-        // WARNING: If we are not the record owner, RecSetRecord() doesn't set our copy
-        // of the record. It sends a set message to the local manager. This can cause
-        // "interesting" results if you are trying to override configuration values
+        // WARNING: If we are not the record owner, RecSetRecord() doesn't set
+        // our copy
+        // of the record. It sends a set message to the local manager. This can
+        // cause
+        // "interesting" results if you are trying to override configuration
+        // values
         // early in startup (before we have synced with the local manager).
         RecSetRecord(record->type, record->name, record->value_type, &data, NULL, REC_SOURCE_ENV, false);
         RecDataClear(record->value_type, &data);
@@ -49,8 +52,10 @@ override_record(const RecordElement *record, void *)
   }
 }
 
-// We process environment variable overrides when we parse the records.config configuration file, but the
-// operator might choose to override a variable that is not present in records.config so we have to post-
+// We process environment variable overrides when we parse the records.config
+// configuration file, but the
+// operator might choose to override a variable that is not present in
+// records.config so we have to post-
 // process the full set of configuration valriables as well.
 void
 RecordsConfigOverrideFromEnvironment()
@@ -85,7 +90,8 @@ initialize_record(const RecordElement *record, void *)
     RecData data = {0};
     RecSourceT source = value == record->value ? REC_SOURCE_DEFAULT : REC_SOURCE_ENV;
 
-    // If you specify a consistency check, you have to specify a regex expression. We abort here
+    // If you specify a consistency check, you have to specify a regex
+    // expression. We abort here
     // so that this breaks QA completely.
     if (record->check != RECC_NULL && record->regex == NULL) {
       ink_fatal("%s has a consistency check but no regular expression", record->name);
@@ -116,7 +122,8 @@ initialize_record(const RecordElement *record, void *)
     } // switch
 
     RecDataClear(record->value_type, &data);
-  } else { // Everything else, except PROCESS, are stats. TODO: Should modularize this too like PROCESS was done.
+  } else { // Everything else, except PROCESS, are stats. TODO: Should
+           // modularize this too like PROCESS was done.
     ink_assert(REC_TYPE_IS_STAT(type));
 
     switch (record->value_type) {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/Rollback.cc
----------------------------------------------------------------------
diff --git a/mgmt/Rollback.cc b/mgmt/Rollback.cc
index a218a7d..4969be9 100644
--- a/mgmt/Rollback.cc
+++ b/mgmt/Rollback.cc
@@ -107,11 +107,14 @@ Rollback::Rollback(const char *baseFileName, bool root_access_needed_) : configF
         activeVerStr = createPathStr(ACTIVE_VERSION);
 
         if (rename(highestSeenStr, activeVerStr) < 0) {
-          mgmt_log(stderr, "[RollBack::Rollback] Automatic Rollback to prior version failed for %s : %s\n", fileName,
-                   strerror(errno));
+          mgmt_log(stderr, "[RollBack::Rollback] Automatic Rollback to prior "
+                           "version failed for %s : %s\n",
+                   fileName, strerror(errno));
           needZeroLength = true;
         } else {
-          mgmt_log(stderr, "[RollBack::Rollback] Automatic Rollback to version succeded for %s\n", fileName, strerror(errno));
+          mgmt_log(stderr, "[RollBack::Rollback] Automatic Rollback to version "
+                           "succeded for %s\n",
+                   fileName, strerror(errno));
           needZeroLength = false;
           highestSeen--;
           // Since we've made the highestVersion active
@@ -134,8 +137,9 @@ Rollback::Rollback(const char *baseFileName, bool root_access_needed_) : configF
           ats_free(alarmMsg);
           closeFile(fd, true);
         } else {
-          mgmt_fatal(stderr, 0,
-                     "[RollBack::Rollback] Unable to find configuration file %s.\n\tCreation of a placeholder failed : %s\n",
+          mgmt_fatal(stderr, 0, "[RollBack::Rollback] Unable to find "
+                                "configuration file %s.\n\tCreation of a "
+                                "placeholder failed : %s\n",
                      fileName, strerror(errno));
         }
       }
@@ -145,8 +149,9 @@ Rollback::Rollback(const char *baseFileName, bool root_access_needed_) : configF
     } else {
       // If is there but we can not stat it, it is unusable to manager
       //   probably due to permissions problems.  Bail!
-      mgmt_fatal(stderr, 0, "[RollBack::Rollback] Unable to find configuration file %s.\n\tStat failed : %s\n", fileName,
-                 strerror(errno));
+      mgmt_fatal(stderr, 0, "[RollBack::Rollback] Unable to find configuration "
+                            "file %s.\n\tStat failed : %s\n",
+                 fileName, strerror(errno));
     }
   } else {
     fileLastModified = TS_ARCHIVE_STAT_MTIME(fileInfo);
@@ -204,7 +209,6 @@ Rollback::~Rollback()
   ats_free(fileName);
 }
 
-
 // Rollback::createPathStr(version_t version)
 //
 //   CALLEE DELETES STORAGE
@@ -296,7 +300,6 @@ Rollback::closeFile(int fd, bool callSync)
   return result;
 }
 
-
 RollBackCodes
 Rollback::updateVersion(textBuffer *buf, version_t basedOn, version_t newVersion, bool notifyChange, bool incVersion)
 {
@@ -323,7 +326,6 @@ Rollback::updateVersion_ml(textBuffer *buf, version_t basedOn, version_t newVers
   return returnCode;
 }
 
-
 RollBackCodes
 Rollback::forceUpdate(textBuffer *buf, version_t newVersion)
 {
@@ -361,7 +363,6 @@ Rollback::internalUpdate(textBuffer *buf, version_t newVersion, bool notifyChang
   bool failedLink = false;
   char *alarmMsg = NULL;
 
-
   // Check to see if the callee has specified a newVersion number
   //   If the newVersion argument is less than zero, the callee
   //   is telling us to use the next version in squence
@@ -413,7 +414,8 @@ Rollback::internalUpdate(textBuffer *buf, version_t newVersion, bool notifyChang
     //    install a new file so that we do not go around in
     //    an endless loop
     if (errno == ENOENT) {
-      mgmt_log(stderr, "[Rollback::internalUpdate] The active version of %s was lost.\n\tThe updated copy was installed.\n",
+      mgmt_log(stderr, "[Rollback::internalUpdate] The active version of %s "
+                       "was lost.\n\tThe updated copy was installed.\n",
                fileName);
       failedLink = true;
     } else {
@@ -424,7 +426,9 @@ Rollback::internalUpdate(textBuffer *buf, version_t newVersion, bool notifyChang
 
   if (rename(nextVersion, activeVersion) < 0) {
     mgmt_log(stderr, "[Rollback::internalUpdate] Rename failed : %s\n", strerror(errno));
-    mgmt_log(stderr, "[Rollback::internalUpdate] Unable to create new version of %s.  Using prior version\n", fileName);
+    mgmt_log(stderr, "[Rollback::internalUpdate] Unable to create new version "
+                     "of %s.  Using prior version\n",
+             fileName);
 
     returnCode = SYS_CALL_ERROR_ROLLBACK;
     goto UPDATE_CLEANUP;
@@ -458,7 +462,6 @@ Rollback::internalUpdate(textBuffer *buf, version_t newVersion, bool notifyChang
   this->numVersions++;
   this->currentVersion = newVersion;
 
-
   returnCode = OK_ROLLBACK;
 
   // Post the change to the config file manager
@@ -491,7 +494,6 @@ UPDATE_CLEANUP:
   return returnCode;
 }
 
-
 RollBackCodes
 Rollback::getVersion(version_t version, textBuffer **buffer)
 {
@@ -546,7 +548,8 @@ Rollback::getVersion_ml(version_t version, textBuffer **buffer)
   } while (readResult > 0);
 
   if ((off_t)newBuffer->spaceUsed() != fileInfo.st_size) {
-    mgmt_log(stderr, "[Rollback::getVersion] Incorrect amount of data retrieved from %s version %d.  Expected: %d   Got: %d\n",
+    mgmt_log(stderr, "[Rollback::getVersion] Incorrect amount of data "
+                     "retrieved from %s version %d.  Expected: %d   Got: %d\n",
              fileName, version, fileInfo.st_size, newBuffer->spaceUsed());
     returnCode = SYS_CALL_ERROR_ROLLBACK;
     delete newBuffer;
@@ -639,8 +642,9 @@ Rollback::findVersions_ml(ExpandingArray *listNames)
   dir = opendir(sysconfdir);
 
   if (dir == NULL) {
-    mgmt_log(stderr, "[Rollback::findVersions] Unable to open configuration directory: %s: %s\n", (const char *)sysconfdir,
-             strerror(errno));
+    mgmt_log(stderr, "[Rollback::findVersions] Unable to open configuration "
+                     "directory: %s: %s\n",
+             (const char *)sysconfdir, strerror(errno));
     return INVALID_VERSION;
   }
   // The fun of Solaris - readdir_r requires a buffer passed into it
@@ -757,7 +761,6 @@ Rollback::findVersions_ml(Queue<versionInfo> &q)
   return highest;
 }
 
-
 RollBackCodes
 Rollback::removeVersion(version_t version)
 {
@@ -908,7 +911,9 @@ Rollback::checkForUserUpdate(RollBackCheckType how)
         delete buf;
       }
       if (r != OK_ROLLBACK) {
-        mgmt_log(stderr, "[Rollback::checkForUserUpdate] Failed to roll changed user file %s: %s", fileName, RollbackStrings[r]);
+        mgmt_log(stderr, "[Rollback::checkForUserUpdate] Failed to roll "
+                         "changed user file %s: %s",
+                 fileName, RollbackStrings[r]);
       }
 
       mgmt_log(stderr, "User has changed config file %s\n", fileName);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/Rollback.h
----------------------------------------------------------------------
diff --git a/mgmt/Rollback.h b/mgmt/Rollback.h
index abc6388..b119a81 100644
--- a/mgmt/Rollback.h
+++ b/mgmt/Rollback.h
@@ -124,7 +124,8 @@ struct versionInfo {
 //    returned
 //
 //  findVersions(ExpandingArray* listNames) - scans the config directory for
-//    all versions of the file.  If listNames is not NULL, pointers to versionInfo
+//    all versions of the file.  If listNames is not NULL, pointers to
+// versionInfo
 //    structures are inserted into it.  If is the callee's responsibility
 //    to ats_free the versionInfo structures.  They are allocated by ats_malloc
 //

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/WebMgmtUtils.cc
----------------------------------------------------------------------
diff --git a/mgmt/WebMgmtUtils.cc b/mgmt/WebMgmtUtils.cc
index b6572e2..5ae7686 100644
--- a/mgmt/WebMgmtUtils.cc
+++ b/mgmt/WebMgmtUtils.cc
@@ -36,7 +36,6 @@
  *
  ****************************************************************************/
 
-
 // bool varSetFromStr(const char*, const char* )
 //
 // Sets the named local manager variable from the value string
@@ -286,7 +285,6 @@ varDataFromName(RecDataT varType, const char *varName, RecData *value)
   return (err == REC_ERR_OKAY);
 }
 
-
 // bool varCounterFromName (const char*, RecFloat* )
 //
 //   Sets the *value to value of the varName.
@@ -435,7 +433,6 @@ varIntFromName(const char *varName, RecInt *value)
   return found;
 }
 
-
 // void percentStrFromFloat(MgmtFloat, char* bufVal)
 //
 //  Converts a float to a percent string
@@ -785,7 +782,6 @@ varType(const char *varName)
   return data_type;
 }
 
-
 // InkHashTable* processFormSubmission(char* submission)
 //
 //  A generic way to handle a HTML form submission.
@@ -1011,7 +1007,6 @@ substituteForHTMLChars(const char *buffer)
   return safeBuf;
 }
 
-
 // bool ProxyShutdown()
 //
 //  Attempts to turn the proxy off.  Returns
@@ -1212,8 +1207,9 @@ recordIPCheck(const char *pattern, const char *value)
   //  regex_t regex;
   //  int result;
   bool check;
-  const char *range_pattern =
-    "\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]";
+  const char *range_pattern = "\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+"
+                              "\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+"
+                              "\\-[0-9]+\\]";
   const char *ip_pattern = "[0-9]*[0-9]*[0-9].[0-9]*[0-9]*[0-9].[0-9]*[0-9]*[0-9].[0-9]*[0-9]*[0-9]";
 
   Tokenizer dotTok1(".");

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/APITestCliRemote.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/APITestCliRemote.cc b/mgmt/api/APITestCliRemote.cc
index a783e68..f7c68d3 100644
--- a/mgmt/api/APITestCliRemote.cc
+++ b/mgmt/api/APITestCliRemote.cc
@@ -140,7 +140,6 @@ print_err(const char *module, TSMgmtError err)
     TSfree(err_msg);
 }
 
-
 /*--------------------------------------------------------------
  * print_ports
  *--------------------------------------------------------------*/
@@ -222,7 +221,6 @@ print_domain_list(TSDomainList list)
   }
 }
 
-
 void
 print_ip_addr_ele(TSIpAddrEle *ele)
 {
@@ -279,7 +277,6 @@ print_list_of_ip_list(TSList list)
   }
 }
 
-
 /*-------------------------------------------------------
  * print_pd_sspec
  *-------------------------------------------------------*/
@@ -349,7 +346,6 @@ print_pd_sspec(TSPdSsFormat info)
   }
   printf("\n");
 
-
   printf("\tscheme: ");
   switch (info.sec_spec.scheme) {
   case TS_SCHEME_NONE:
@@ -369,7 +365,6 @@ print_pd_sspec(TSPdSsFormat info)
   return;
 }
 
-
 void
 print_cache_ele(TSCacheEle *ele)
 {
@@ -434,13 +429,13 @@ print_cache_ele(TSCacheEle *ele)
 
   /*
      print_pd_sspec(ele->cache_info);
-     printf("Time: %d day, %d hr, %d min, %d sec\n", ele->time_period.d, ele->time_period.h,
+     printf("Time: %d day, %d hr, %d min, %d sec\n", ele->time_period.d,
+     ele->time_period.h,
      ele->time_period.m, ele->time_period.s);
    */
   return;
 }
 
-
 void
 print_hosting_ele(TSHostingEle *ele)
 {
@@ -839,7 +834,6 @@ print_ele_list(TSFileNameT file, TSCfgContext ctx)
   return;
 }
 
-
 /***************************************************************************
  * Control Testing
  ***************************************************************************/
@@ -936,13 +930,15 @@ test_action_need(void)
 
   // RU_NULL record
   TSRecordSetString("proxy.config.proxy_name", "proxy_dorky", &action);
-  printf("[TSRecordSetString] proxy.config.proxy_name \n\tAction Should: [%d]\n\tAction is    : [%d]\n", TS_ACTION_UNDEFINED,
-         action);
+  printf("[TSRecordSetString] proxy.config.proxy_name \n\tAction Should: "
+         "[%d]\n\tAction is    : [%d]\n",
+         TS_ACTION_UNDEFINED, action);
 
   // RU_RESTART_TS record
   TSRecordSetInt("proxy.config.cluster.cluster_port", 6666, &action);
-  printf("[TSRecordSetInt] proxy.config.cluster.cluster_port\n\tAction Should: [%d]\n\tAction is    : [%d]\n", TS_ACTION_RESTART,
-         action);
+  printf("[TSRecordSetInt] proxy.config.cluster.cluster_port\n\tAction Should: "
+         "[%d]\n\tAction is    : [%d]\n",
+         TS_ACTION_RESTART, action);
 }
 
 /* Bouncer the traffic_server process(es) */
@@ -991,12 +987,13 @@ test_error_records()
   ret = TSRecordSetInt("proy.config.cop.core_signal", new_port, &action);
   print_err("TSRecordSetInt", ret);
 
-
   printf("\n");
   if (TSRecordGetCounter("proxy.press.socks.connections_successful", &ctr1) != TS_ERR_OKAY)
     printf("TSRecordGetCounter FAILED!\n");
   else
-    printf("[TSRecordGetCounter]proxy.process.socks.connections_successful=%" PRId64 " \n", ctr1);
+    printf("[TSRecordGetCounter]proxy.process.socks.connections_successful="
+           "%" PRId64 " \n",
+           ctr1);
 
   printf("\n");
   if (TSRecordGetFloat("proxy.conig.http.cache.fuzz.probability", &flt1) != TS_ERR_OKAY)
@@ -1034,7 +1031,6 @@ test_records()
     printf("[TSRecordSetInt] proxy.config.cop.core_signal=%" PRId64 " \n", new_port);
 #endif
 
-
 #if TEST_REC_GET
   TSRecordEle *rec_ele;
   // retrieve a string value record using generic RecordGet
@@ -1048,7 +1044,6 @@ test_records()
   printf("\n\n");
 #endif
 
-
 #if TEST_REC_GET_2
   // retrieve a string value record using generic RecordGet
   rec_ele = TSRecordEleCreate();
@@ -1078,7 +1073,6 @@ test_records()
   else
     printf("[TSRecordSetString] proxy.config.proxy_name=%s\n", new_str);
 
-
   // get
   err = TSRecordGetString("proxy.config.proxy_name", &rec_value);
   if (err != TS_ERR_OKAY)
@@ -1116,17 +1110,23 @@ test_records()
   if (TSRecordGetCounter("proxy.process.socks.connections_successful", &ctr1) != TS_ERR_OKAY)
     printf("TSRecordGetCounter FAILED!\n");
   else
-    printf("[TSRecordGetCounter]proxy.process.socks.connections_successful=%" PRId64 " \n", ctr1);
+    printf("[TSRecordGetCounter]proxy.process.socks.connections_successful="
+           "%" PRId64 " \n",
+           ctr1);
 
   if (TSRecordSetCounter("proxy.process.socks.connections_successful", new_ctr, &action) != TS_ERR_OKAY)
     printf("TSRecordSetCounter FAILED!\n");
   else
-    printf("[TSRecordSetCounter] proxy.process.socks.connections_successful=%" PRId64 " \n", new_ctr);
+    printf("[TSRecordSetCounter] "
+           "proxy.process.socks.connections_successful=%" PRId64 " \n",
+           new_ctr);
 
   if (TSRecordGetCounter("proxy.process.socks.connections_successful", &ctr2) != TS_ERR_OKAY)
     printf("TSRecordGetCounter FAILED!\n");
   else
-    printf("[TSRecordGetCounter]proxy.process.socks.connections_successful=%" PRId64 " \n", ctr2);
+    printf("[TSRecordGetCounter]proxy.process.socks.connections_successful="
+           "%" PRId64 " \n",
+           ctr2);
   printf("\n");
 #endif
 
@@ -1251,7 +1251,6 @@ test_record_get_mlt(void)
   TSStringListEnqueue(name_list, v7);
   TSStringListEnqueue(name_list, v8);
 
-
   num = TSStringListLen(name_list);
   printf("Num Records to Get: %d\n", num);
   ret = TSRecordGetMlt(name_list, rec_list);
@@ -1335,7 +1334,6 @@ test_record_set_mlt(void)
   ele5->rec_type = TS_REC_INT;
   ele5->valueT.int_val = 555;
 
-
   TSListEnqueue(list, ele4);
   TSListEnqueue(list, ele1);
   TSListEnqueue(list, ele2);
@@ -1355,7 +1353,6 @@ test_record_set_mlt(void)
   TSListDestroy(list);
 }
 
-
 /***************************************************************************
  * File I/O Testing
  ***************************************************************************/
@@ -1625,7 +1622,8 @@ test_cfg_context_move(char *args)
 
   // shift all the ele's down so that the next to bottom ele is now top ele
   // move all ele's above the last ele down; bottom ele becomes top ele
-  printf("\nShift all Ele's above second to last ele down; bottom ele becomes top ele\n");
+  printf("\nShift all Ele's above second to last ele down; bottom ele becomes "
+         "top ele\n");
   for (i = count - 3; i >= 0; i--) {
     err = TSCfgContextMoveEleDown(ctx, i);
     if (err != TS_ERR_OKAY) {
@@ -1730,7 +1728,6 @@ test_cfg_context_ops()
   }
   // print_VirtIpAddr_ele_list(ctx);
 
-
   printf("\nMove ele at index %d to botoom of list\n", insert_at);
   for (i = insert_at; i < TSCfgContextGetCount(ctx); i++) {
     err = TSCfgContextMoveEleDown(ctx, i);
@@ -1752,7 +1749,6 @@ test_cfg_context_ops()
   }
   // print_VirtIpAddr_ele_list(ctx);
 
-
   // commit change
   TSCfgContextCommit(ctx, NULL, NULL);
 
@@ -1885,7 +1881,6 @@ test_cfg_socks()
   TSCfgContextDestroy(ctx);
 }
 
-
 /***************************************************************************
  * Events Testing
  ***************************************************************************/
@@ -2137,7 +2132,6 @@ set_stats()
 
   fprintf(stderr, "[set_stats] Set Dummy Stat Values\n");
 
-
   TSRecordSetInt("proxy.process.http.user_agent_response_document_total_size", 100, &action);
   TSRecordSetInt("proxy.process.http.user_agent_response_header_total_size", 100, &action);
   TSRecordSetInt("proxy.process.http.current_client_connections", 100, &action);
@@ -2147,7 +2141,6 @@ set_stats()
   TSRecordSetInt("proxy.process.http.current_server_connections", 100, &action);
   TSRecordSetInt("proxy.process.http.current_server_transactions", 100, &action);
 
-
   TSRecordSetFloat("proxy.node.bandwidth_hit_ratio", 110, &action);
   TSRecordSetFloat("proxy.node.hostdb.hit_ratio", 110, &action);
   TSRecordSetFloat("proxy.node.cache.percent_free", 110, &action);
@@ -2194,7 +2187,6 @@ print_stats()
 
   fprintf(stderr, "[print_stats]\n");
 
-
   TSRecordGetInt("proxy.process.http.user_agent_response_document_total_size", &i1);
   TSRecordGetInt("proxy.process.http.user_agent_response_header_total_size", &i2);
   TSRecordGetInt("proxy.process.http.current_client_connections", &i3);
@@ -2274,12 +2266,14 @@ sync_test()
   TSActionNeedT action;
 
   TSRecordSetString("proxy.config.proxy_name", "dorkface", &action);
-  printf("[TSRecordSetString] proxy.config.proxy_name \n\tAction Should: [%d]\n\tAction is    : [%d]\n", TS_ACTION_UNDEFINED,
-         action);
+  printf("[TSRecordSetString] proxy.config.proxy_name \n\tAction Should: "
+         "[%d]\n\tAction is    : [%d]\n",
+         TS_ACTION_UNDEFINED, action);
 
   TSRecordSetInt("proxy.config.cluster.cluster_port", 3333, &action);
-  printf("[TSRecordSetInt] proxy.config.cluster.cluster_port\n\tAction Should: [%d]\n\tAction is    : [%d]\n", TS_ACTION_RESTART,
-         action);
+  printf("[TSRecordSetInt] proxy.config.cluster.cluster_port\n\tAction Should: "
+         "[%d]\n\tAction is    : [%d]\n",
+         TS_ACTION_RESTART, action);
 
   if (TSRecordSet("proxy.config.http.cache.fuzz.probability", "-0.3333", &action) != TS_ERR_OKAY)
     printf("TSRecordSet FAILED!\n");
@@ -2396,12 +2390,10 @@ runInteractive()
       sync_test();
     }
 
-
   } // end while(1)
 
 } // end runInteractive
 
-
 /* ------------------------------------------------------------------------
  * main
  * ------------------------------------------------------------------------


[5/8] trafficserver git commit: TS-974: Partial Object Caching.

Posted by am...@apache.org.
http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CfgContextImpl.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/CfgContextImpl.cc b/mgmt/api/CfgContextImpl.cc
index 9a5f0e3..595fb9a 100644
--- a/mgmt/api/CfgContextImpl.cc
+++ b/mgmt/api/CfgContextImpl.cc
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #include "libts.h"
 #include "ink_platform.h"
 
@@ -67,7 +66,6 @@ CommentObj::getCfgEleCopy()
   return (TSCfgEle *)copy_comment_ele(m_ele);
 }
 
-
 //--------------------------------------------------------------------------
 // CacheObj
 //--------------------------------------------------------------------------
@@ -247,7 +245,6 @@ CacheObj::getCfgEleCopy()
   return (TSCfgEle *)copy_cache_ele(m_ele);
 }
 
-
 //--------------------------------------------------------------------------
 // CongestionObj
 //--------------------------------------------------------------------------
@@ -465,7 +462,6 @@ CongestionObj::getCfgEleCopy()
   return (TSCfgEle *)copy_congestion_ele(m_ele);
 }
 
-
 //--------------------------------------------------------------------------
 // HostingObj
 //--------------------------------------------------------------------------
@@ -976,7 +972,6 @@ IpAllowObj::getCfgEleCopy()
   return (TSCfgEle *)copy_ip_allow_ele(m_ele);
 }
 
-
 //--------------------------------------------------------------------------
 // ParentProxyObj
 //--------------------------------------------------------------------------
@@ -1418,7 +1413,6 @@ PluginObj::getCfgEleCopy()
   return (TSCfgEle *)copy_plugin_ele(m_ele);
 }
 
-
 //--------------------------------------------------------------------------
 // RemapObj
 //--------------------------------------------------------------------------
@@ -1449,7 +1443,8 @@ RemapObj::RemapObj(TokenList *tokens)
   if (m_ele->cfg_ele.type == TS_TYPE_UNDEFINED) {
     goto FORMAT_ERR;
   }
-  // The first token must either be "map, "reverse_map", "redirect", and redirect_temporary
+  // The first token must either be "map, "reverse_map", "redirect", and
+  // redirect_temporary
   token = tokens->first();
 
   // target
@@ -1460,7 +1455,8 @@ RemapObj::RemapObj(TokenList *tokens)
   }
 
   // TODO: Should we check the return value (count) here?
-  fromTok.Initialize(token->name, ALLOW_EMPTY_TOKS); // allow empty token for parse sanity check
+  fromTok.Initialize(token->name, ALLOW_EMPTY_TOKS); // allow empty token for
+                                                     // parse sanity check
 
   if (strcmp(fromTok[0], "http") == 0) {
     m_ele->from_scheme = TS_SCHEME_HTTP;
@@ -1514,7 +1510,8 @@ RemapObj::RemapObj(TokenList *tokens)
   }
 
   // TODO: Should we check the return value (count) here?
-  toTok.Initialize(token->value, ALLOW_EMPTY_TOKS); // allow empty token for parse sanity check
+  toTok.Initialize(token->value, ALLOW_EMPTY_TOKS); // allow empty token for
+                                                    // parse sanity check
 
   if (strcmp(toTok[0], "http") == 0) {
     m_ele->to_scheme = TS_SCHEME_HTTP;
@@ -2196,7 +2193,8 @@ StorageObj::StorageObj(TSStorageEle *ele)
   m_valid = isValid(); // now validate
 }
 
-// must have at least 1 token (token-name = pathname, token-value = size (if any) )
+// must have at least 1 token (token-name = pathname, token-value = size (if
+// any) )
 StorageObj::StorageObj(TokenList *tokens)
 {
   Token *tok;
@@ -2389,7 +2387,6 @@ VirtIpAddrObj::getCfgEleCopy()
   return (TSCfgEle *)copy_virt_ip_addr_ele(m_ele);
 }
 
-
 /*****************************************************************
  * CfgContext
  *****************************************************************/

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CfgContextManager.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/CfgContextManager.cc b/mgmt/api/CfgContextManager.cc
index d8a7196..83f4ab5 100644
--- a/mgmt/api/CfgContextManager.cc
+++ b/mgmt/api/CfgContextManager.cc
@@ -141,7 +141,6 @@ CfgContextCommit(CfgContext *ctx, LLQ *errRules)
   return err;
 }
 
-
 /* ---------------------------------------------------------------
  * CfgContextGet
  * ---------------------------------------------------------------
@@ -198,7 +197,6 @@ CfgContextGet(CfgContext *ctx)
   return TS_ERR_OKAY;
 }
 
-
 /***************************************************************
  * CfgContext Operations
  ***************************************************************/
@@ -265,7 +263,6 @@ CfgContextGetObjAt(CfgContext *ctx, int index)
   return NULL; // invalid index
 }
 
-
 /*--------------------------------------------------------------
  * CfgContextGetEleAt
  *--------------------------------------------------------------
@@ -304,7 +301,6 @@ CfgContextGetEleAt(CfgContext *ctx, int index)
   return NULL; // invalid index
 }
 
-
 /*--------------------------------------------------------------
  * CfgContextGetFirst
  *--------------------------------------------------------------
@@ -375,7 +371,6 @@ CfgContextGetNext(CfgContext *ctx, TSCfgIterState *state)
   return NULL; // ERROR
 }
 
-
 /*--------------------------------------------------------------
  * CfgContextMoveEleUp
  *--------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CfgContextManager.h
----------------------------------------------------------------------
diff --git a/mgmt/api/CfgContextManager.h b/mgmt/api/CfgContextManager.h
index bce6fe8..8578af7 100644
--- a/mgmt/api/CfgContextManager.h
+++ b/mgmt/api/CfgContextManager.h
@@ -39,7 +39,6 @@
 #include "CfgContextImpl.h"
 #include "CfgContextDefs.h"
 
-
 /***************************************************************************
  * CfgContext Operations
  ***************************************************************************/
@@ -52,7 +51,6 @@ TSMgmtError CfgContextDestroy(CfgContext *ctx);
 TSMgmtError CfgContextCommit(CfgContext *ctx, LLQ *errRules = NULL);
 TSMgmtError CfgContextGet(CfgContext *ctx);
 
-
 /***************************************************************************
  * CfgContext Operations
  ***************************************************************************/

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CfgContextUtils.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/CfgContextUtils.cc b/mgmt/api/CfgContextUtils.cc
index 873a6eb..2452c51 100644
--- a/mgmt/api/CfgContextUtils.cc
+++ b/mgmt/api/CfgContextUtils.cc
@@ -86,7 +86,8 @@ string_to_ip_addr_ele(const char *str)
     ip_a = ats_strdup(const_ip_a);
     ip_b = ats_strdup(const_ip_b);
 
-    // determine if ip's are cidr type; only test if ip_a is cidr, assume both are same
+    // determine if ip's are cidr type; only test if ip_a is cidr, assume both
+    // are same
     cidr_tokens.Initialize(ip_a, COPY_TOKS);
     numTokens = cidr_tokens.count();
     if (numTokens == 1) { // Range, NON-CIDR TYPE
@@ -117,7 +118,6 @@ Lerror:
   return NULL;
 }
 
-
 /* ----------------------------------------------------------------------------
  * ip_addr_ele_to_string
  * ---------------------------------------------------------------------------
@@ -208,7 +208,8 @@ ip_addr_to_string(TSIpAddr ip)
  * string_to_ip_addr
  * ---------------------------------------------------------------------------
  * Converts an ip address in dotted-decimal string format into a string;
- * allocates memory for string. If IP is invalid, then returns TS_INVALID_IP_ADDR.
+ * allocates memory for string. If IP is invalid, then returns
+ * TS_INVALID_IP_ADDR.
  */
 TSIpAddr
 string_to_ip_addr(const char *str)
@@ -559,7 +560,6 @@ int_list_to_string(TSIntList list, const char *delimiter)
   if (list == TS_INVALID_LIST || !delimiter)
     return NULL;
 
-
   numElems = queue_len((LLQ *)list);
 
   memset(buf, 0, MAX_BUF_SIZE);
@@ -616,7 +616,6 @@ Lerror:
   return TS_INVALID_LIST;
 }
 
-
 /* ---------------------------------------------------------------
  * string_to_domain_list
  * ---------------------------------------------------------------
@@ -654,7 +653,6 @@ string_to_domain_list(const char *str_list, const char *delimiter)
   return list;
 }
 
-
 /*----------------------------------------------------------------------------
  * domain_list_to_string
  *----------------------------------------------------------------------------
@@ -958,7 +956,6 @@ pdest_sspec_to_string(TSPrimeDestT pd, char *pd_val, TSSspec *sspec)
   return str;
 }
 
-
 /*----------------------------------------------------------------------------
  * string_to_pdss_format
  *----------------------------------------------------------------------------
@@ -1037,7 +1034,6 @@ Lerror:
   return TS_ERR_FAIL;
 }
 
-
 /*----------------------------------------------------------------------------
  * hms_time_to_string
  *----------------------------------------------------------------------------
@@ -1185,7 +1181,6 @@ Lerror:
   return TS_ERR_FAIL;
 }
 
-
 /*----------------------------------------------------------------------------
  * string_to_header_type
  *----------------------------------------------------------------------------
@@ -1247,7 +1242,6 @@ string_to_scheme_type(const char *scheme)
   return TS_SCHEME_UNDEFINED;
 }
 
-
 char *
 scheme_type_to_string(TSSchemeT scheme)
 {
@@ -1486,7 +1480,6 @@ admin_acc_type_to_string(TSAccessT access)
   return NULL;
 }
 
-
 /***************************************************************************
  * Tokens-to-Struct Converstion Functions
  ***************************************************************************/
@@ -1531,7 +1524,6 @@ tokens_to_pdss_format(TokenList *tokens, Token *first_tok, TSPdSsFormat *pdss)
   }
   pdss->pd_val = ats_strdup(first_tok->value);
 
-
   // iterate through tokens checking for sec specifiers
   // state determines which sec specifier being checked
   // the state is only set if there's a sec spec match
@@ -2104,7 +2096,6 @@ get_rule_type(TokenList *token_list, TSFileNameT file)
   case TS_FNAME_IP_ALLOW: /* ip_allow.config */
     return TS_IP_ALLOW;
 
-
   case TS_FNAME_LOGS_XML: /* logs_xml.config */
     printf(" *** CfgContextUtils.cc: NOT DONE YET! **\n");
     //  TS_LOG_FILTER,             /* logs_xml.config */
@@ -2187,7 +2178,6 @@ copy_cfg_ele(TSCfgEle *src_ele, TSCfgEle *dst_ele)
   dst_ele->error = src_ele->error;
 }
 
-
 void
 copy_sspec(TSSspec *src, TSSspec *dst)
 {
@@ -2452,7 +2442,6 @@ copy_congestion_ele(TSCongestionEle *ele)
   return nele;
 }
 
-
 TSHostingEle *
 copy_hosting_ele(TSHostingEle *ele)
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CfgContextUtils.h
----------------------------------------------------------------------
diff --git a/mgmt/api/CfgContextUtils.h b/mgmt/api/CfgContextUtils.h
index 3fb4de0..e8487f4 100644
--- a/mgmt/api/CfgContextUtils.h
+++ b/mgmt/api/CfgContextUtils.h
@@ -131,7 +131,6 @@ char *admin_acc_type_to_string(TSAccessT access);
  ***************************************************************************/
 Token *tokens_to_pdss_format(TokenList *tokens, Token *first_tok, TSPdSsFormat *pdss);
 
-
 /***************************************************************************
  * Validation Functions
  ***************************************************************************/
@@ -153,7 +152,6 @@ CfgEleObj *create_ele_obj_from_rule_node(Rule *rule);
 CfgEleObj *create_ele_obj_from_ele(TSCfgEle *ele);
 TSRuleTypeT get_rule_type(TokenList *token_list, TSFileNameT file);
 
-
 /***************************************************************************
  * Copy Helper Functions
  ***************************************************************************/

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CoreAPI.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/CoreAPI.cc b/mgmt/api/CoreAPI.cc
index 48da1e2..d9bad27 100644
--- a/mgmt/api/CoreAPI.cc
+++ b/mgmt/api/CoreAPI.cc
@@ -268,7 +268,6 @@ threads_for_process(pid_t proc)
     }
   }
 
-
 done:
   if (dir) {
     closedir(dir);
@@ -357,8 +356,10 @@ ServerBacktrace(unsigned /* options */, char **trace)
 {
   *trace = NULL;
 
-  // Unfortunately, we need to be privileged here. We either need to be root or to be holding
-  // the CAP_SYS_PTRACE capability. Even though we are the parent traffic_manager, it is not
+  // Unfortunately, we need to be privileged here. We either need to be root or
+  // to be holding
+  // the CAP_SYS_PTRACE capability. Even though we are the parent
+  // traffic_manager, it is not
   // traceable without privilege because the process credentials do not match.
   ElevateAccess access(true, ElevateAccess::TRACE_PRIVILEGE);
   threadlist threads(threads_for_process(lmgmt->watched_process_pid));

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CoreAPI.h
----------------------------------------------------------------------
diff --git a/mgmt/api/CoreAPI.h b/mgmt/api/CoreAPI.h
index cb5ee88..d8ef854 100644
--- a/mgmt/api/CoreAPI.h
+++ b/mgmt/api/CoreAPI.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #ifndef _CORE_API_H
 #define _CORE_API_H
 
@@ -49,7 +48,8 @@ TSMgmtError ServerBacktrace(unsigned options, char **trace);
 TSMgmtError Reconfigure();                            // TS reread config files
 TSMgmtError Restart(unsigned options);                // restart TM
 TSMgmtError Bounce(unsigned options);                 // restart traffic_server
-TSMgmtError StorageDeviceCmdOffline(const char *dev); // Storage device operation.
+TSMgmtError StorageDeviceCmdOffline(const char *dev); // Storage device
+                                                      // operation.
 
 /***************************************************************************
  * Record Operations

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CoreAPIRemote.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/CoreAPIRemote.cc b/mgmt/api/CoreAPIRemote.cc
index 60592ad..0ae4ee5 100644
--- a/mgmt/api/CoreAPIRemote.cc
+++ b/mgmt/api/CoreAPIRemote.cc
@@ -192,7 +192,8 @@ Init(const char *socket_path, TSInitOptionT options)
 
   ts_init_options = options;
 
-  // XXX This should use RecConfigReadRuntimeDir(), but that's not linked into the management
+  // XXX This should use RecConfigReadRuntimeDir(), but that's not linked into
+  // the management
   // libraries. The caller has to pass down the right socket path :(
   if (!socket_path) {
     Layout::create();

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/CoreAPIShared.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/CoreAPIShared.cc b/mgmt/api/CoreAPIShared.cc
index 746dc60..c62f076 100644
--- a/mgmt/api/CoreAPIShared.cc
+++ b/mgmt/api/CoreAPIShared.cc
@@ -92,7 +92,8 @@ readHTTPResponse(int sock, char *buffer, int bufsize, uint64_t timeout)
     //      printf("before poll_read\n");
     err = poll_read(sock, timeout);
     if (err < 0) {
-      //      printf("(test) poll read failed [%d '%s']\n", errno, strerror (errno));
+      //      printf("(test) poll read failed [%d '%s']\n", errno, strerror
+      // (errno));
       goto error;
     } else if (err == 0) {
       //      printf("(test) read timeout\n");
@@ -144,7 +145,8 @@ sendHTTPRequest(int sock, char *req, uint64_t timeout)
 
   int err = poll_write(sock, timeout);
   if (err < 0) {
-    //      printf("(test) poll write failed [%d '%s']\n", errno, strerror (errno));
+    //      printf("(test) poll write failed [%d '%s']\n", errno, strerror
+    // (errno));
     goto error;
   } else if (err == 0) {
     //      printf("(test) write timeout\n");
@@ -158,7 +160,8 @@ sendHTTPRequest(int sock, char *req, uint64_t timeout)
     } while ((err < 0) && ((errno == EINTR) || (errno == EAGAIN)));
 
     if (err < 0) {
-      //      printf("(test) write failed [%d '%s']\n", errno, strerror (errno));
+      //      printf("(test) write failed [%d '%s']\n", errno, strerror
+      // (errno));
       goto error;
     }
     requestPtr += err;
@@ -175,7 +178,6 @@ error: /* "Houston, we have a problem!" (Apollo 13) */
   return TS_ERR_NET_WRITE;
 }
 
-
 /* Modified from TrafficCop.cc (open_socket) */
 int
 connectDirect(const char *host, int port, uint64_t /* timeout ATS_UNUSED */)
@@ -188,7 +190,8 @@ connectDirect(const char *host, int port, uint64_t /* timeout ATS_UNUSED */)
   } while ((sock < 0) && ((errno == EINTR) || (errno == EAGAIN)));
 
   if (sock < 0) {
-    //        printf("(test) unable to create socket [%d '%s']\n", errno, strerror(errno));
+    //        printf("(test) unable to create socket [%d '%s']\n", errno,
+    // strerror(errno));
     goto error;
   }
 
@@ -204,7 +207,8 @@ connectDirect(const char *host, int port, uint64_t /* timeout ATS_UNUSED */)
   } while ((err < 0) && ((errno == EINTR) || (errno == EAGAIN)));
 
   if (err < 0) {
-    //        printf("(test) unable to put socket in non-blocking mode [%d '%s']\n", errno, strerror (errno));
+    //        printf("(test) unable to put socket in non-blocking mode [%d
+    // '%s']\n", errno, strerror (errno));
     goto error;
   }
   // Connect to the specified port on the machine we're running on.
@@ -223,7 +227,8 @@ connectDirect(const char *host, int port, uint64_t /* timeout ATS_UNUSED */)
   } while ((err < 0) && ((errno == EINTR) || (errno == EAGAIN)));
 
   if ((err < 0) && (errno != EINPROGRESS)) {
-    //        printf("(test) unable to connect to server [%d '%s'] at port %d\n", errno, strerror (errno), port);
+    //        printf("(test) unable to connect to server [%d '%s'] at port
+    // %d\n", errno, strerror (errno), port);
     goto error;
   }
   return sock;
@@ -271,7 +276,6 @@ poll_write(int fd, int timeout)
     err = poll(&info, 1, timeout);
   } while ((err < 0) && ((errno == EINTR) || (errno == EAGAIN)));
 
-
   if ((err > 0) && (info.revents & POLLOUT)) {
     return 1;
   }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/EventCallback.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/EventCallback.cc b/mgmt/api/EventCallback.cc
index 4634cf3..894528f 100644
--- a/mgmt/api/EventCallback.cc
+++ b/mgmt/api/EventCallback.cc
@@ -35,7 +35,6 @@
 #include "EventCallback.h"
 #include "CoreAPIShared.h"
 
-
 /**********************************************************************
  * create_event_callback
  *
@@ -92,7 +91,6 @@ create_callback_table(const char *lock_name)
   return cb_table;
 }
 
-
 /**********************************************************************
  * delete_callback_table
  *
@@ -148,7 +146,8 @@ LLQ *
 get_events_with_callbacks(CallbackTable *cb_table)
 {
   LLQ *cb_ev_list;
-  bool all_events = true; // set to false if at least one event doesn't have registered callback
+  bool all_events = true; // set to false if at least one event doesn't have
+                          // registered callback
 
   cb_ev_list = create_queue();
   for (int i = 0; i < NUM_EVENTS; i++) {
@@ -174,7 +173,8 @@ get_events_with_callbacks(CallbackTable *cb_table)
  * purpose: Registers the specified function for the specified event in
  *          the specified callback list
  * input: cb_list - the table of callbacks to store the callback fn
- *        event_name - the event to store the callback for (if NULL, register for all events)
+ *        event_name - the event to store the callback for (if NULL, register
+ *for all events)
  *        func - the callback function
  *        first_cb - true only if this is the event's first callback
  * output: TS_ERR_xx
@@ -236,14 +236,14 @@ cb_table_register(CallbackTable *cb_table, const char *event_name, TSEventSignal
   return TS_ERR_OKAY;
 }
 
-
 /**********************************************************************
  * cb_table_unregister
  *
  * purpose: Unregisters the specified function for the specified event in
  *          the specified callback list
  * input: cb_table - the table of callbacks to store the callback fn
- *        event_name - the event to store the callback for (if NULL, register for all events)
+ *        event_name - the event to store the callback for (if NULL, register
+ *for all events)
  *        func - the callback function
  *        first_cb - true only if this is the event's first callback
  * output: TS_ERR_xx

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/EventControlMain.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/EventControlMain.cc b/mgmt/api/EventControlMain.cc
index 2f0ce77..3b46b65 100644
--- a/mgmt/api/EventControlMain.cc
+++ b/mgmt/api/EventControlMain.cc
@@ -308,7 +308,8 @@ event_callback_main(void *arg)
         EventClientT *new_client_con = new_event_client();
 
         if (!new_client_con) {
-          // Debug ("TS_Control_Main", "can't create new EventClientT for new connection\n");
+          // Debug ("TS_Control_Main", "can't create new EventClientT for new
+          // connection\n");
         } else {
           // accept connection
           new_con_fd = mgmt_accept(con_socket_fd, new_client_con->adr, &addr_len);
@@ -320,7 +321,8 @@ event_callback_main(void *arg)
 
       // some other file descriptor; for each one, service request
       if (fds_ready > 0) { // RECEIVED A REQUEST from remote API client
-        // see if there are more fd to set - iterate through all entries in hash table
+        // see if there are more fd to set - iterate through all entries in hash
+        // table
         con_entry = ink_hash_table_iterator_first(accepted_clients, &con_state);
         while (con_entry) {
           client_entry = (EventClientT *)ink_hash_table_entry_value(accepted_clients, con_entry);
@@ -349,7 +351,8 @@ event_callback_main(void *arg)
               continue;
             }
 
-          } // end if(client_entry->fd && FD_ISSET(client_entry->fd, &selectFDs))
+          } // end if(client_entry->fd && FD_ISSET(client_entry->fd,
+          // &selectFDs))
 
           con_entry = ink_hash_table_iterator_next(accepted_clients, &con_state);
         } // end while (con_entry)
@@ -408,7 +411,8 @@ event_callback_main(void *arg)
   // delete tables
   delete_mgmt_events();
 
-  // iterate through hash table; close client socket connections and remove entry
+  // iterate through hash table; close client socket connections and remove
+  // entry
   con_entry = ink_hash_table_iterator_first(accepted_clients, &con_state);
   while (con_entry) {
     client_entry = (EventClientT *)ink_hash_table_entry_value(accepted_clients, con_entry);
@@ -433,7 +437,8 @@ event_callback_main(void *arg)
 /**************************************************************************
  * handle_event_reg_callback
  *
- * purpose: handles request to register a callback for a specific event (or all events)
+ * purpose: handles request to register a callback for a specific event (or all
+ *events)
  * input: client - the client currently reading the msg from
  *        req    - the event_name
  * output: TS_ERR_xx
@@ -451,7 +456,8 @@ handle_event_reg_callback(EventClientT *client, void *req, size_t reqlen)
     goto done;
   }
 
-  // mark the specified alarm as "wanting to be notified" in the client's alarm_registered list
+  // mark the specified alarm as "wanting to be notified" in the client's
+  // alarm_registered list
   if (strlen(name) == 0) { // mark all alarms
     for (int i = 0; i < NUM_EVENTS; i++) {
       client->events_registered[i] = true;
@@ -476,7 +482,8 @@ done:
 /**************************************************************************
  * handle_event_unreg_callback
  *
- * purpose: handles request to unregister a callback for a specific event (or all events)
+ * purpose: handles request to unregister a callback for a specific event (or
+ *all events)
  * input: client - the client currently reading the msg from
  *        req    - the event_name
  * output: TS_ERR_xx
@@ -494,7 +501,8 @@ handle_event_unreg_callback(EventClientT *client, void *req, size_t reqlen)
     goto done;
   }
 
-  // mark the specified alarm as "wanting to be notified" in the client's alarm_registered list
+  // mark the specified alarm as "wanting to be notified" in the client's
+  // alarm_registered list
   if (strlen(name) == 0) { // mark all alarms
     for (int i = 0; i < NUM_EVENTS; i++) {
       client->events_registered[i] = false;
@@ -562,7 +570,8 @@ handle_event_message(EventClientT *client, void *req, size_t reqlen)
     uid_t euid = -1;
     gid_t egid = -1;
 
-    // For now, all event messages require privilege. This is compatible with earlier
+    // For now, all event messages require privilege. This is compatible with
+    // earlier
     // versions of Traffic Server that always required privilege.
     if (mgmt_get_peereid(client->fd, &euid, &egid) == -1 || (euid != 0 && euid != geteuid())) {
       return TS_ERR_PERMISSION_DENIED;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/GenericParser.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/GenericParser.cc b/mgmt/api/GenericParser.cc
index eb97e4f..4d8a85c 100644
--- a/mgmt/api/GenericParser.cc
+++ b/mgmt/api/GenericParser.cc
@@ -223,7 +223,6 @@ Rule::parse(const char *const_rule, TSFileNameT filetype)
   }
 }
 
-
 /**
  * arm_securityParse
  **/
@@ -265,7 +264,6 @@ Rule::arm_securityParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * cacheParse
  * CAUTION: This function is used for number of similar formatted
@@ -364,7 +362,6 @@ Rule::cacheParse(char *rule, unsigned short minNumToken, unsigned short maxNumTo
   return m_tokenList;
 }
 
-
 /**
  * congestionParse
  **/
@@ -386,7 +383,6 @@ Rule::hostingParse(char *rule)
   return cacheParse(rule, 2, 2);
 }
 
-
 /**
  * icpParse
  *   - mimic proxy/ICPConfig/icp_config_change_callback
@@ -412,7 +408,6 @@ Rule::icpParse(char *rule, unsigned short minNumToken, unsigned short maxNumToke
     return NULL;
   }
 
-
   m_tokenList = new TokenList();
   for (; tokenStr; tokenStr = ruleTok.iterNext(&ruleTok_state)) {
     token = new Token();
@@ -423,7 +418,6 @@ Rule::icpParse(char *rule, unsigned short minNumToken, unsigned short maxNumToke
   return m_tokenList;
 }
 
-
 /**
  * ip_allowParse
  **/
@@ -436,7 +430,6 @@ Rule::ip_allowParse(char *rule)
   return cacheParse(rule, 2, 2);
 }
 
-
 /**
  * logsParse
  **/
@@ -446,7 +439,6 @@ Rule::logsParse(char * /* rule ATS_UNUSED */)
   return NULL;
 }
 
-
 /**
  * log_hostsParse
  **/
@@ -465,7 +457,6 @@ Rule::log_hostsParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * logs_xmlParse
  **/
@@ -475,7 +466,6 @@ Rule::logs_xmlParse(char * /* rule ATS_UNUSED */)
   return NULL;
 }
 
-
 /**
  * parentParse
  **/
@@ -485,7 +475,6 @@ Rule::parentParse(char *rule)
   return cacheParse(rule, 2);
 }
 
-
 /**
  * volumeParse
  **/
@@ -495,7 +484,6 @@ Rule::volumeParse(char *rule)
   return cacheParse(rule, 3, 3);
 }
 
-
 /**
  * pluginParse
  **/
@@ -518,7 +506,6 @@ Rule::pluginParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * remapParse
  **/
@@ -560,7 +547,6 @@ Rule::remapParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * socksParse
  **/
@@ -575,7 +561,6 @@ Rule::socksParse(char *rule)
   bool insideQuote = false;
   const char *newStr;
 
-
   if (numRuleTok < 2) {
     setErrorHint("Expecting at least 2 space delimited tokens");
     return NULL;
@@ -673,7 +658,6 @@ Rule::socksParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * splitdnsParse
  **/
@@ -759,7 +743,6 @@ Rule::splitdnsParse(char *rule)
   //  return cacheParse(rule, 2);
 }
 
-
 /**
  * updateParse
  **/
@@ -791,7 +774,6 @@ Rule::updateParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * vaddrsParse
  **/
@@ -817,7 +799,6 @@ Rule::vaddrsParse(char *rule)
   return m_tokenList;
 }
 
-
 /**
  * storageParse
  * ------------
@@ -852,7 +833,6 @@ Rule::storageParse(char *rule)
   return m_tokenList;
 }
 
-
 /*
  * bool Rule::inQuote(char *str)
  *   Counts the number of quote found in "str"
@@ -871,7 +851,6 @@ Rule::inQuote(const char *str)
   return (numQuote & 1);
 }
 
-
 /***************************************************************************
  * RuleList
  *   a rule list is a list of rule; which compose to a configuration file.
@@ -990,7 +969,8 @@ RuleList::parse(char *fileBuf, TSFileNameT filetype)
         rule->setRuleStr(line);
         rule->tokenList = m_tokenList;
       } else {
-        // rule->setComment("## WARNING: The following configuration rule is invalid!");
+        // rule->setComment("## WARNING: The following configuration rule is
+        // invalid!");
         size_t error_rule_size = sizeof(char) * (strlen(line) + strlen("#ERROR: ") + 1);
         char *error_rule = (char *)ats_malloc(error_rule_size);
 
@@ -1009,7 +989,6 @@ RuleList::parse(char *fileBuf, TSFileNameT filetype)
   // this->Print();
 }
 
-
 /***************************************************************************
  * General Routines
  ***************************************************************************/

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/GenericParser.h
----------------------------------------------------------------------
diff --git a/mgmt/api/GenericParser.h b/mgmt/api/GenericParser.h
index 954681e..5ff7765 100644
--- a/mgmt/api/GenericParser.h
+++ b/mgmt/api/GenericParser.h
@@ -151,7 +151,6 @@ private:
   Queue<Token> m_nameList;
 };
 
-
 /***************************************************************************
  * Rule
  *   a rule is nothing more than just a token list. This object also

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/INKMgmtAPI.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/INKMgmtAPI.cc b/mgmt/api/INKMgmtAPI.cc
index d10bcf8..281aced 100644
--- a/mgmt/api/INKMgmtAPI.cc
+++ b/mgmt/api/INKMgmtAPI.cc
@@ -160,7 +160,8 @@ TSListIsValid(TSList l)
   return true;
 }
 
-/*--- TSIpAddrList operations -------------------------------------------------*/
+/*--- TSIpAddrList operations
+ * -------------------------------------------------*/
 tsapi TSIpAddrList
 TSIpAddrListCreate(void)
 {
@@ -1621,7 +1622,8 @@ TSRecordSetMlt(TSList rec_list, TSActionNeedT *action_need)
     enqueue((LLQ *)rec_list, ele);
   }
 
-  // set the action_need to be the most sever action needed of all the "set" calls
+  // set the action_need to be the most sever action needed of all the "set"
+  // calls
   *action_need = top_action_req;
 
   return status;
@@ -1894,7 +1896,8 @@ TSConfigFileWrite(TSFileNameT file, char *text, int size, int version)
  *         body       - a buffer is allocated on the body char* pointer
  *         bodySize   - the size of the body buffer is returned
  * Output: TSMgmtError   - TS_ERR_OKAY if succeed, TS_ERR_FAIL otherwise
- * Obsolete:  tsapi TSMgmtError TSReadFromUrl (char *url, char **text, int *size);
+ * Obsolete:  tsapi TSMgmtError TSReadFromUrl (char *url, char **text, int
+ * *size);
  * NOTE: The URL can be expressed in the following forms:
  *       - http://www.example.com:80/products/network/index.html
  *       - http://www.example.com/products/network/index.html

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/NetworkMessage.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/NetworkMessage.cc b/mgmt/api/NetworkMessage.cc
index 160bed8..3f6d669 100644
--- a/mgmt/api/NetworkMessage.cc
+++ b/mgmt/api/NetworkMessage.cc
@@ -69,7 +69,8 @@ static const struct NetCmdOperation requests[] = {
   /* RECORD_DESCRIBE_CONFIG     */ {3, {MGMT_MARSHALL_INT, MGMT_MARSHALL_STRING, MGMT_MARSHALL_INT}},
 };
 
-// Responses always begin with a TSMgmtError code, followed by additional fields.
+// Responses always begin with a TSMgmtError code, followed by additional
+// fields.
 static const struct NetCmdOperation responses[] = {
   /* FILE_READ                  */ {3, {MGMT_MARSHALL_INT, MGMT_MARSHALL_INT, MGMT_MARSHALL_DATA}},
   /* FILE_WRITE                 */ {1, {MGMT_MARSHALL_INT}},
@@ -263,8 +264,10 @@ send_mgmt_error(int fd, OpType optype, TSMgmtError error)
   return TS_ERR_FAIL;
 }
 
-// Send a management message response. We don't need to worry about retransmitting the message if we get
-// disconnected, so this is much simpler. We can directly marshall the response as a data object.
+// Send a management message response. We don't need to worry about
+// retransmitting the message if we get
+// disconnected, so this is much simpler. We can directly marshall the response
+// as a data object.
 TSMgmtError
 send_mgmt_response(int fd, OpType optype, ...)
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/NetworkMessage.h
----------------------------------------------------------------------
diff --git a/mgmt/api/NetworkMessage.h b/mgmt/api/NetworkMessage.h
index 1aea548..73c9ef4 100644
--- a/mgmt/api/NetworkMessage.h
+++ b/mgmt/api/NetworkMessage.h
@@ -70,7 +70,8 @@ struct mgmt_message_sender {
   virtual ~mgmt_message_sender(){};
 };
 
-// Marshall and send a request, prefixing the message length as a MGMT_MARSHALL_INT.
+// Marshall and send a request, prefixing the message length as a
+// MGMT_MARSHALL_INT.
 TSMgmtError send_mgmt_request(const mgmt_message_sender &snd, OpType optype, ...);
 TSMgmtError send_mgmt_request(int fd, OpType optype, ...);
 
@@ -80,7 +81,8 @@ TSMgmtError send_mgmt_error(int fd, OpType op, TSMgmtError error);
 // Parse a request message from a buffer.
 TSMgmtError recv_mgmt_request(void *buf, size_t buflen, OpType optype, ...);
 
-// Marshall and send a response, prefixing the message length as a MGMT_MARSHALL_INT.
+// Marshall and send a response, prefixing the message length as a
+// MGMT_MARSHALL_INT.
 TSMgmtError send_mgmt_response(int fd, OpType optype, ...);
 
 // Parse a response message from a buffer.
@@ -89,7 +91,8 @@ TSMgmtError recv_mgmt_response(void *buf, size_t buflen, OpType optype, ...);
 // Pull a management message (either request or response) off the wire.
 TSMgmtError recv_mgmt_message(int fd, MgmtMarshallData &msg);
 
-// Extract the first MGMT_MARSHALL_INT from the buffered message. This is the OpType.
+// Extract the first MGMT_MARSHALL_INT from the buffered message. This is the
+// OpType.
 OpType extract_mgmt_request_optype(void *msg, size_t msglen);
 
 #endif /* _NETWORK_MESSAGE_H_ */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/NetworkUtilsLocal.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/NetworkUtilsLocal.cc b/mgmt/api/NetworkUtilsLocal.cc
index 7bd6c81..09a0fda 100644
--- a/mgmt/api/NetworkUtilsLocal.cc
+++ b/mgmt/api/NetworkUtilsLocal.cc
@@ -44,11 +44,13 @@
  * preprocess_msg
  *
  * purpose: reads in all the message; parses the message into header info
- *          (OpType + msg_len) and the request portion (used by the handle_xx fns)
+ *          (OpType + msg_len) and the request portion (used by the handle_xx
+ *fns)
  * input: sock_info - socket msg is read from
  *        msg       - the data from the network message (no OpType or msg_len)
  * output: TS_ERR_xx ( if TS_ERR_OKAY, then parameters set successfully)
- * notes: Since preprocess_msg already removes the OpType and msg_len, this part o
+ * notes: Since preprocess_msg already removes the OpType and msg_len, this part
+ *o
  *        the message is not dealt with by the other parsing functions
  **********************************************************************/
 TSMgmtError

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/NetworkUtilsRemote.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/NetworkUtilsRemote.cc b/mgmt/api/NetworkUtilsRemote.cc
index 5d70366..49717f5 100644
--- a/mgmt/api/NetworkUtilsRemote.cc
+++ b/mgmt/api/NetworkUtilsRemote.cc
@@ -156,7 +156,8 @@ ts_connect()
 #endif
   // connect call
   if (connect(event_socket_fd, (struct sockaddr *)&client_event_sock, sockaddr_len) < 0) {
-    // fprintf(stderr, "[connect] ERROR (event_socket_fd %d): %s\n", event_socket_fd, strerror(int(errno)));
+    // fprintf(stderr, "[connect] ERROR (event_socket_fd %d): %s\n",
+    // event_socket_fd, strerror(int(errno)));
     close(event_socket_fd);
     close(main_socket_fd);
     event_socket_fd = -1;
@@ -173,7 +174,8 @@ ERROR:
 /***************************************************************************
  * disconnect
  *
- * purpose: disconnect from traffic server; closes sockets and resets their values
+ * purpose: disconnect from traffic server; closes sockets and resets their
+ *values
  * input: None
  * output: TS_ERR_FAIL, TS_ERR_OKAY
  * notes: doesn't do clean up - all cleanup should be done before here
@@ -264,13 +266,15 @@ reconnect_loop(int num_attempts)
     numTries++;
     err = reconnect();
     if (err == TS_ERR_OKAY) {
-      // fprintf(stderr, "[reconnect_loop] Successful reconnction; Leave loop\n");
+      // fprintf(stderr, "[reconnect_loop] Successful reconnction; Leave
+      // loop\n");
       return TS_ERR_OKAY; // successful connection
     }
     sleep(1); // to make it slower
   }
 
-  // fprintf(stderr, "[reconnect_loop] FAIL TO CONNECT after %d tries\n", num_attempts);
+  // fprintf(stderr, "[reconnect_loop] FAIL TO CONNECT after %d tries\n",
+  // num_attempts);
   return err; // unsuccessful connection after num_attempts
 }
 
@@ -641,7 +645,8 @@ event_poll_thread_main(void *arg)
     event->id = get_event_id(name);
     event->description = desc;
 
-    // got event notice; spawn new thread to handle the event's callback functions
+    // got event notice; spawn new thread to handle the event's callback
+    // functions
     ink_thread_create(event_callback_thread, (void *)event);
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/NetworkUtilsRemote.h
----------------------------------------------------------------------
diff --git a/mgmt/api/NetworkUtilsRemote.h b/mgmt/api/NetworkUtilsRemote.h
index 5c63c8e..4300490 100644
--- a/mgmt/api/NetworkUtilsRemote.h
+++ b/mgmt/api/NetworkUtilsRemote.h
@@ -56,7 +56,8 @@ void set_socket_paths(const char *path);
  * the client connection information stored in the variables in
  * NetworkUtilsRemote.cc
  */
-TSMgmtError ts_connect(); /* TODO: update documenation, Renamed due to conflict with connect() in <sys/socket.h> on some platforms*/
+TSMgmtError ts_connect(); /* TODO: update documenation, Renamed due to conflict with
+                             connect() in <sys/socket.h> on some platforms*/
 TSMgmtError disconnect();
 TSMgmtError reconnect();
 TSMgmtError reconnect_loop(int num_attempts);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/api/TSControlMain.cc
----------------------------------------------------------------------
diff --git a/mgmt/api/TSControlMain.cc b/mgmt/api/TSControlMain.cc
index a771ca3..eaf14c8 100644
--- a/mgmt/api/TSControlMain.cc
+++ b/mgmt/api/TSControlMain.cc
@@ -150,7 +150,8 @@ ts_ctrl_main(void *arg)
 
     if (con_socket_fd >= 0) {
       FD_SET(con_socket_fd, &selectFDs);
-      // Debug("ts_main", "[ts_ctrl_main] add fd %d to select set\n", con_socket_fd);
+      // Debug("ts_main", "[ts_ctrl_main] add fd %d to select set\n",
+      // con_socket_fd);
     }
     // see if there are more fd to set
     con_entry = ink_hash_table_iterator_first(accepted_con, &con_state);
@@ -189,7 +190,8 @@ ts_ctrl_main(void *arg)
 
       // some other file descriptor; for each one, service request
       if (fds_ready > 0) { // RECEIVED A REQUEST from remote API client
-        // see if there are more fd to set - iterate through all entries in hash table
+        // see if there are more fd to set - iterate through all entries in hash
+        // table
         con_entry = ink_hash_table_iterator_first(accepted_con, &con_state);
         while (con_entry) {
           Debug("ts_main", "[ts_ctrl_main] We have a remote client request!\n");
@@ -222,7 +224,8 @@ ts_ctrl_main(void *arg)
               continue;
             }
 
-          } // end if(client_entry->fd && FD_ISSET(client_entry->fd, &selectFDs))
+          } // end if(client_entry->fd && FD_ISSET(client_entry->fd,
+          // &selectFDs))
 
           con_entry = ink_hash_table_iterator_next(accepted_con, &con_state);
         } // end while (con_entry)
@@ -236,7 +239,8 @@ ts_ctrl_main(void *arg)
   Debug("ts_main", "[ts_ctrl_main] CLOSING AND SHUTTING DOWN OPERATIONS\n");
   close_socket(con_socket_fd);
 
-  // iterate through hash table; close client socket connections and remove entry
+  // iterate through hash table; close client socket connections and remove
+  // entry
   con_entry = ink_hash_table_iterator_first(accepted_con, &con_state);
   while (con_entry) {
     client_entry = (ClientT *)ink_hash_table_entry_value(accepted_con, con_entry);
@@ -395,7 +399,8 @@ send_record_match(RecT /* rec_type */, void *edata, int /* registered */, const
       match->err = send_record_get_response(match->fd, TS_REC_COUNTER, name, &(rec_val->rec_counter), sizeof(TSCounter));
       break;
     case RECD_STRING:
-      // For NULL string parameters, end the literal "NULL" to match the behavior of MgmtRecordGet(). Make sure to send
+      // For NULL string parameters, end the literal "NULL" to match the
+      // behavior of MgmtRecordGet(). Make sure to send
       // the trailing NULL.
       if (rec_val->rec_string) {
         match->err = send_record_get_response(match->fd, TS_REC_STRING, name, rec_val->rec_string, strlen(rec_val->rec_string) + 1);
@@ -1164,7 +1169,8 @@ handle_control_message(int fd, void *req, size_t reqlen)
     uid_t euid = -1;
     gid_t egid = -1;
 
-    // For privileged calls, ensure we have caller credentials and that the caller is privileged.
+    // For privileged calls, ensure we have caller credentials and that the
+    // caller is privileged.
     if (handlers[optype].flags & MGMT_API_PRIVILEGED) {
       if (mgmt_get_peereid(fd, &euid, &egid) == -1 || (euid != 0 && euid != geteuid())) {
         Debug("ts_main", "denied privileged API access on fd=%d for uid=%d gid=%d", fd, euid, egid);
@@ -1177,8 +1183,10 @@ handle_control_message(int fd, void *req, size_t reqlen)
 
   error = handlers[optype].handler(fd, req, reqlen);
   if (error != TS_ERR_OKAY) {
-    // NOTE: if the error was produced by the handler sending a response, this could attempt to
-    // send a response again. However, this would only happen if sending the response failed, so
+    // NOTE: if the error was produced by the handler sending a response, this
+    // could attempt to
+    // send a response again. However, this would only happen if sending the
+    // response failed, so
     // it is safe to fail to send it again here ...
     return send_mgmt_error(fd, optype, error);
   }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/cluster/ClusterCom.cc
----------------------------------------------------------------------
diff --git a/mgmt/cluster/ClusterCom.cc b/mgmt/cluster/ClusterCom.cc
index 5ec30ed..4734407 100644
--- a/mgmt/cluster/ClusterCom.cc
+++ b/mgmt/cluster/ClusterCom.cc
@@ -68,8 +68,10 @@ drainIncomingChannel_broadcast(void *arg)
   for (;;) { /* Loop draining mgmt network channels */
     int nevents = 0;
 
-    // It's not clear whether this can happen, but historically, this code was written as if it
-    // could. A hacky little sleep here will prevent this thread spinning on the read timeout.
+    // It's not clear whether this can happen, but historically, this code was
+    // written as if it
+    // could. A hacky little sleep here will prevent this thread spinning on the
+    // read timeout.
     if (ccom->cluster_type == NO_CLUSTER || ccom->receive_fd == ts::NO_FD) {
       mgmt_sleep_sec(1);
     }
@@ -157,8 +159,10 @@ drainIncomingChannel(void *arg)
   for (;;) { /* Loop draining mgmt network channels */
     ink_zero(message);
 
-    // It's not clear whether this can happen, but historically, this code was written as if it
-    // could. A hacky little sleep here will prevent this thread spinning on the read timeout.
+    // It's not clear whether this can happen, but historically, this code was
+    // written as if it
+    // could. A hacky little sleep here will prevent this thread spinning on the
+    // read timeout.
     if (ccom->cluster_type == NO_CLUSTER || ccom->reliable_server_fd == ts::NO_FD) {
       mgmt_sleep_sec(1);
     }
@@ -288,20 +292,25 @@ drainIncomingChannel(void *arg)
           if (buff)
             delete buff;
         } else if (strstr(message, "cmd: shutdown_manager")) {
-          mgmt_log("[ClusterCom::drainIncomingChannel] Received manager shutdown request\n");
+          mgmt_log("[ClusterCom::drainIncomingChannel] Received manager "
+                   "shutdown request\n");
           lmgmt->mgmt_shutdown_outstanding = MGMT_PENDING_RESTART;
         } else if (strstr(message, "cmd: shutdown_process")) {
-          mgmt_log("[ClusterCom::drainIncomingChannel] Received process shutdown request\n");
+          mgmt_log("[ClusterCom::drainIncomingChannel] Received process "
+                   "shutdown request\n");
           lmgmt->processShutdown();
         } else if (strstr(message, "cmd: restart_process")) {
-          mgmt_log("[ClusterCom::drainIncomingChannel] Received restart process request\n");
+          mgmt_log("[ClusterCom::drainIncomingChannel] Received restart "
+                   "process request\n");
           lmgmt->processRestart();
         } else if (strstr(message, "cmd: bounce_process")) {
-          mgmt_log("[ClusterCom::drainIncomingChannel] Received bounce process request\n");
+          mgmt_log("[ClusterCom::drainIncomingChannel] Received bounce process "
+                   "request\n");
           lmgmt->processBounce();
         } else if (strstr(message, "cmd: clear_stats")) {
           char sname[1024];
-          mgmt_log("[ClusterCom::drainIncomingChannel] Received clear stats request\n");
+          mgmt_log("[ClusterCom::drainIncomingChannel] Received clear stats "
+                   "request\n");
           if (sscanf(message, "cmd: clear_stats %1023s", sname) != 1) {
             lmgmt->clearStats(sname);
           } else {
@@ -322,7 +331,6 @@ drainIncomingChannel(void *arg)
   return NULL;
 } /* End drainIncomingChannel */
 
-
 /*
  * cluster_com_port_watcher(...)
  *   This function watches updates and changes that are made to the
@@ -343,7 +351,6 @@ cluster_com_port_watcher(const char *name, RecDataT /* data_type ATS_UNUSED */,
   return 0;
 } /* End cluster_com_port_watcher */
 
-
 ClusterCom::ClusterCom(unsigned long oip, char *host, int mcport, char *group, int rsport, char *p)
   : our_wall_clock(0), alive_peers_count(0), reliable_server_fd(0), broadcast_fd(0), receive_fd(0)
 {
@@ -354,7 +361,8 @@ ClusterCom::ClusterCom(unsigned long oip, char *host, int mcport, char *group, i
   if (strlen(host) >= 1024) {
     mgmt_fatal(stderr, 0, "[ClusterCom::ClusterCom] Hostname too large: %s\n", host);
   }
-  // the constructor does a memset() on broadcast_addr and receive_addr, initializing them
+  // the constructor does a memset() on broadcast_addr and receive_addr,
+  // initializing them
   // coverity[uninit_member]
   memset(&broadcast_addr, 0, sizeof(broadcast_addr));
   memset(&receive_addr, 0, sizeof(receive_addr));
@@ -390,7 +398,8 @@ ClusterCom::ClusterCom(unsigned long oip, char *host, int mcport, char *group, i
   found = (rec_err == REC_ERR_OKAY);
 
   if (!found) {
-    mgmt_fatal(stderr, 0, "[ClusterCom::ClusterCom] no cluster_configuration filename configured\n");
+    mgmt_fatal(stderr, 0, "[ClusterCom::ClusterCom] no cluster_configuration "
+                          "filename configured\n");
   }
 
   if (strlen(p) + strlen(cluster_file) >= 1024) {
@@ -467,7 +476,6 @@ ClusterCom::ClusterCom(unsigned long oip, char *host, int mcport, char *group, i
   return;
 } /* End ClusterCom::ClusterCom */
 
-
 /*
  * checkPeers(...)
  *   Function checks on our peers by racing through the peer list(ht) and
@@ -482,8 +490,10 @@ ClusterCom::checkPeers(time_t *ticker)
   InkHashTableEntry *entry;
   InkHashTableIteratorState iterator_state;
 
-  // Hack in the file manager in case the rollback needs to send a notification. This is definitely
-  // a hack, but it helps break the dependency on global FileManager in traffic_manager.
+  // Hack in the file manager in case the rollback needs to send a notification.
+  // This is definitely
+  // a hack, but it helps break the dependency on global FileManager in
+  // traffic_manager.
   cluster_file_rb->configFiles = configFiles;
 
   if (cluster_type == NO_CLUSTER)
@@ -585,7 +595,6 @@ ClusterCom::checkPeers(time_t *ticker)
           }
         }
 
-
         if (num_peers == number_of_nodes) {
           /*
            * If the number of peers in the hash_table is the same as the
@@ -629,9 +638,11 @@ ClusterCom::checkPeers(time_t *ticker)
     if (signal_alarm) {
       /*
       lmgmt->alarm_keeper->signalAlarm(MGMT_ALARM_PROXY_SYSTEM_ERROR,
-                                       "[TrafficManager] Unable to write cluster.config, membership unchanged");
+                                       "[TrafficManager] Unable to write
+      cluster.config, membership unchanged");
       */
-      mgmt_elog(0, "[TrafficManager] Unable to write cluster.config, membership unchanged");
+      mgmt_elog(0, "[TrafficManager] Unable to write cluster.config, "
+                   "membership unchanged");
     }
     *ticker = t;
   }
@@ -658,7 +669,6 @@ ClusterCom::checkPeers(time_t *ticker)
   return;
 } /* End ClusterCom::checkPeers */
 
-
 void
 ClusterCom::generateClusterDelta(void)
 {
@@ -691,7 +701,6 @@ ClusterCom::generateClusterDelta(void)
 
 } /* End ClusterCom::generateClusterDelta */
 
-
 /*
  * handleMultCastMessage(...)
  *   Function is called to handle(parse) messages received from the broadcast
@@ -778,7 +787,8 @@ ClusterCom::handleMultiCastMessage(char *message)
   if (!strstr(line, "os: ") || !strstr(line, sys_name)) {
     /*
     lmgmt->alarm_keeper->signalAlarm(MGMT_ALARM_PROXY_SYSTEM_ERROR,
-                                     "Received Multicast message from peer running mis-match"
+                                     "Received Multicast message from peer
+    running mis-match"
                                      " Operating system, please investigate");
     */
     Debug("ccom", "[ClusterCom::handleMultiCastMessage] Received message from peer "
@@ -791,8 +801,10 @@ ClusterCom::handleMultiCastMessage(char *message)
   if (!strstr(line, "rel: ") || !strstr(line, sys_release)) {
     /*
     lmgmt->alarm_keeper->signalAlarm(MGMT_ALARM_PROXY_SYSTEM_ERROR,
-                                     "Received Multicast message from peer running mis-match"
-                                     " Operating system release, please investigate");
+                                     "Received Multicast message from peer
+    running mis-match"
+                                     " Operating system release, please
+    investigate");
     */
     Debug("ccom", "[ClusterCom::handleMultiCastMessage] Received message from peer "
                   "running different os/release '%s'(ours os: '%s' rel: '%s'\n",
@@ -838,8 +850,10 @@ ClusterCom::handleMultiCastMessage(char *message)
     p->num_virt_addrs = 0;
 
     // Safe since these are completely static
-    // TODO: This might no longer be completely optimal, since we don't keep track of
-    // how many RECT_NODE stats there are. I'm hoping it's negligible though, but worst
+    // TODO: This might no longer be completely optimal, since we don't keep
+    // track of
+    // how many RECT_NODE stats there are. I'm hoping it's negligible though,
+    // but worst
     // case we can reoptimize this later (and more efficiently).
     int cnt = 0;
     p->node_rec_data.recs = (RecRecord *)ats_malloc(sizeof(RecRecord) * g_num_records);
@@ -899,7 +913,6 @@ Lbogus:
   }
 } /* End ClusterCom::handleMultiCastMessage */
 
-
 /*
  * handleMultiCastStatPacket(...)
  *   Function groks the stat packets received on the mc channel and updates
@@ -935,7 +948,9 @@ ClusterCom::handleMultiCastStatPacket(char *last, ClusterPeerInfo *peer)
           tmp_msg_val = ink_atoi64(v3 + 1);
       }
       if (!v2 || !v3) {
-        mgmt_elog(0, "[ClusterCom::handleMultiCastStatPacket] Invalid message-line(%d) '%s'\n", __LINE__, line);
+        mgmt_elog(0, "[ClusterCom::handleMultiCastStatPacket] Invalid "
+                     "message-line(%d) '%s'\n",
+                  __LINE__, line);
         return;
       }
       ink_assert(i == tmp_id && rec->data_type == tmp_type);
@@ -956,7 +971,9 @@ ClusterCom::handleMultiCastStatPacket(char *last, ClusterPeerInfo *peer)
       // the types specified are all have a defined constant size
       // coverity[secure_coding]
       if (sscanf(line, "%d:%d: %f", &tmp_id, (int *)&tmp_type, &tmp_msg_val) != 3) {
-        mgmt_elog(0, "[ClusterCom::handleMultiCastStatPacket] Invalid message-line(%d) '%s'\n", __LINE__, line);
+        mgmt_elog(0, "[ClusterCom::handleMultiCastStatPacket] Invalid "
+                     "message-line(%d) '%s'\n",
+                  __LINE__, line);
         return;
       }
       ink_assert(i == tmp_id && rec->data_type == tmp_type);
@@ -975,7 +992,9 @@ ClusterCom::handleMultiCastStatPacket(char *last, ClusterPeerInfo *peer)
       // the types specified are all have a defined constant size
       // coverity[secure_coding]
       if (sscanf(line, "%d:%d: %n", &tmp_id, (int *)&tmp_type, &ccons) != 2) {
-        mgmt_elog(0, "[ClusterCom::handleMultiCastStatPacket] Invalid message-line(%d) '%s'\n", __LINE__, line);
+        mgmt_elog(0, "[ClusterCom::handleMultiCastStatPacket] Invalid "
+                     "message-line(%d) '%s'\n",
+                  __LINE__, line);
         return;
       }
       tmp_msg_val = &line[ccons];
@@ -1137,7 +1156,9 @@ ClusterCom::handleMultiCastFilePacket(char *last, char *ip)
     file_update_failure = false;
     // coverity[secure_coding]
     if (sscanf(line, "%1023s %d %" PRId64 "\n", file, &ver, &tt) != 3) {
-      mgmt_elog(0, "[ClusterCom::handleMultiCastFilePacket] Invalid message-line(%d) '%s'\n", __LINE__, line);
+      mgmt_elog(0, "[ClusterCom::handleMultiCastFilePacket] Invalid "
+                   "message-line(%d) '%s'\n",
+                __LINE__, line);
       return;
     }
 
@@ -1145,15 +1166,15 @@ ClusterCom::handleMultiCastFilePacket(char *last, char *ip)
       our_ver = rb->getCurrentVersion();
       if (ver > our_ver) { /* Their version is newer */
                            /*
-                            * FIX: we have the timestamp from them as well, should we also
-                            * figure that into this? or are version numbers sufficient?
-                            *
-                            * (mod > rb->versionTimeStamp(our_ver)
-                            *
-                            * When fixing this, watch out for the workaround put in place
-                            * for INKqa08567.  File timestamps aren't sent around the
-                            * cluster anymore.
-                            */
+         * FIX: we have the timestamp from them as well, should we also
+         * figure that into this? or are version numbers sufficient?
+         *
+         * (mod > rb->versionTimeStamp(our_ver)
+         *
+         * When fixing this, watch out for the workaround put in place
+         * for INKqa08567.  File timestamps aren't sent around the
+         * cluster anymore.
+         */
         char message[1024];
         textBuffer *reply = new textBuffer(2048); /* Start with 2k file size */
         snprintf(message, sizeof(message), "file: %s %d", file, ver);
@@ -1230,7 +1251,6 @@ ClusterCom::handleMultiCastFilePacket(char *last, char *ip)
   return;
 } /* End ClusterCom::handleMultiCastFilePacket */
 
-
 /*
  * handleMultiCastAlarmPacket(...)
  *   Function receives incoming alarm messages and updates the alarms class.
@@ -1253,7 +1273,9 @@ ClusterCom::handleMultiCastAlarmPacket(char *last, char *ip)
     // both types have a finite size
     // coverity[secure_coding]
     if (sscanf(line, "alarm: %d %n", &a, &ccons) != 1) {
-      mgmt_elog(0, "[ClusterCom::handleMultiCastAlarmPacket] Invalid message-line(%d) '%s'\n", __LINE__, line);
+      mgmt_elog(0, "[ClusterCom::handleMultiCastAlarmPacket] Invalid "
+                   "message-line(%d) '%s'\n",
+                __LINE__, line);
       return;
     }
 
@@ -1267,7 +1289,6 @@ ClusterCom::handleMultiCastAlarmPacket(char *last, char *ip)
   return;
 } /* End ClusterCom::handleMultiCastAlarmPacket */
 
-
 /*
  * handleMultiCastVMapPacket(...)
  *   Handles incoming reports from peers about which virtual interfaces
@@ -1292,7 +1313,9 @@ ClusterCom::handleMultiCastVMapPacket(char *last, char *ip)
     }
     // coverity[secure_coding]
     if (sscanf(line, "virt: %79s", vaddr) != 1) {
-      mgmt_elog(0, "[ClusterCom::handleMultiCastVMapPacket] Invalid message-line(%d) '%s'\n", __LINE__, line);
+      mgmt_elog(0, "[ClusterCom::handleMultiCastVMapPacket] Invalid "
+                   "message-line(%d) '%s'\n",
+                __LINE__, line);
       return;
     }
 
@@ -1309,7 +1332,6 @@ ClusterCom::handleMultiCastVMapPacket(char *last, char *ip)
   return;
 } /* End ClusterCom::handleMultiCastVMapPacket */
 
-
 /*
  * sendSharedData
  *   Function serves as aggregator of NODE data to be shared with the
@@ -1380,7 +1402,6 @@ ClusterCom::sendSharedData(bool send_proxy_heart_beat)
   return true;
 } /* End ClusterCom::sendSharedData */
 
-
 /*
  * constructSharedGenericPacket(...)
  *   A generic packet builder that can construct config or stat
@@ -1415,7 +1436,6 @@ ClusterCom::constructSharedGenericPacket(char *message, int max, RecT packet_typ
   running_sum += strlen(tmp);
   ink_release_assert(running_sum < max);
 
-
   if (sys_release[0]) {
     snprintf(tmp, sizeof(tmp), "rel: %s\n", sys_release);
   } else {
@@ -1491,7 +1511,6 @@ ClusterCom::constructSharedGenericPacket(char *message, int max, RecT packet_typ
   return;
 } /* End ClusterCom::constructSharedGenericPacket */
 
-
 void
 ClusterCom::constructSharedStatPacket(char *message, int max)
 {
@@ -1499,7 +1518,6 @@ ClusterCom::constructSharedStatPacket(char *message, int max)
   return;
 } /* End ClusterCom::constructSharedStatPacket */
 
-
 /* static int constructSharedPacketHeader(...)
  *   Each multicast packet needs to have the following
  *   header info.  Ip, Cluster Name, TS Version.  This function
@@ -1520,7 +1538,6 @@ ClusterCom::constructSharedPacketHeader(const AppVersionInfo &version, char *mes
   return running_sum;
 } /* End ClusterCom::constructSharedPacketHeader */
 
-
 /*
  * constructSharedFilePacket(...)
  *   Foreach of the config files we are holding build a packet that
@@ -1590,7 +1607,6 @@ ClusterCom::constructSharedFilePacket(char *message, int max)
   return;
 } /* End ClusterCom::constructSharedFilePacket */
 
-
 /*
  * estabilishChannels(...)
  *   Sets up the multi-cast and reliable tcp channels for cluster
@@ -1639,7 +1655,6 @@ ClusterCom::establishChannels()
   return;
 }
 
-
 /*
  * establishBroadcastChannel()
  *   Setup our multicast channel for broadcasting.
@@ -1652,12 +1667,14 @@ ClusterCom::establishBroadcastChannel(void)
   }
 
   if (fcntl(broadcast_fd, F_SETFD, 1) < 0) {
-    mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to set close-on-exec.\n");
+    mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to set "
+                      "close-on-exec.\n");
   }
 
   int one = 1;
   if (setsockopt(broadcast_fd, SOL_SOCKET, SO_REUSEADDR, (const char *)&one, sizeof(one)) < 0) {
-    mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to set socket options.\n");
+    mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to set "
+                      "socket options.\n");
   }
 
   memset(&broadcast_addr, 0, sizeof(broadcast_addr));
@@ -1669,7 +1686,8 @@ ClusterCom::establishBroadcastChannel(void)
 
   /* Set ttl(max forwards), 1 should be default(same subnetwork). */
   if (setsockopt(broadcast_fd, IPPROTO_IP, IP_MULTICAST_TTL, (const char *)&ttl, sizeof(ttl)) < 0) {
-    mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to setsocketopt, ttl\n");
+    mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to "
+                      "setsocketopt, ttl\n");
   }
 
   /* Disable broadcast loopback, that is broadcasting to self */
@@ -1680,7 +1698,6 @@ ClusterCom::establishBroadcastChannel(void)
   return;
 } /* End ClusterCom::establishBroadcastChannel */
 
-
 /*
  * establishReceiveChannel()
  *   Setup our multicast channel for receiving incoming broadcasts
@@ -1716,7 +1733,8 @@ ClusterCom::establishReceiveChannel(int fatal_on_error)
       Debug("ccom", "establishReceiveChannel: Unable to set socket to reuse addr");
       return 1;
     }
-    mgmt_fatal(errno, "[ClusterCom::establishReceiveChannel] Unable to set socket to reuse addr.\n");
+    mgmt_fatal(errno, "[ClusterCom::establishReceiveChannel] Unable to set "
+                      "socket to reuse addr.\n");
   }
 
   memset(&receive_addr, 0, sizeof(receive_addr));
@@ -1731,7 +1749,9 @@ ClusterCom::establishReceiveChannel(int fatal_on_error)
       Debug("ccom", "establishReceiveChannel: Unable to bind to socket, port %d", mc_port);
       return 1;
     }
-    mgmt_fatal(errno, "[ClusterCom::establishReceiveChannel] Unable to bind to socket, port %d\n", mc_port);
+    mgmt_fatal(errno, "[ClusterCom::establishReceiveChannel] Unable to bind to "
+                      "socket, port %d\n",
+               mc_port);
   }
   /* Add ourselves to the group */
   struct ip_mreq mc_request;
@@ -1745,13 +1765,14 @@ ClusterCom::establishReceiveChannel(int fatal_on_error)
 
       return 1;
     }
-    mgmt_fatal(errno, "[ClusterCom::establishReceiveChannel] Can't add ourselves to multicast group %s\n", mc_group);
+    mgmt_fatal(errno, "[ClusterCom::establishReceiveChannel] Can't add "
+                      "ourselves to multicast group %s\n",
+               mc_group);
   }
 
   return 0;
 } /* End ClusterCom::establishReceiveChannel */
 
-
 /*
  * sendOutgoingMessage
  *   Function basically writes a message to the broadcast_fd, it is blocking,
@@ -1768,7 +1789,6 @@ ClusterCom::sendOutgoingMessage(char *buf, int len)
   return true;
 } /* End ClusterCom::sendOutgoingMessage */
 
-
 bool
 ClusterCom::sendClusterMessage(int msg_type, const char *args)
 {
@@ -1809,7 +1829,8 @@ ClusterCom::sendClusterMessage(int msg_type, const char *args)
 
     tmp_ret = rl_sendReliableMessage(tmp->inet_address, msg, strlen(msg));
     if (tmp->num_virt_addrs != -1) {
-      /* Only change return val if he is not dead, if dead manager could be up. */
+      /* Only change return val if he is not dead, if dead manager could be up.
+       */
       ret = tmp_ret;
     }
   }
@@ -1836,7 +1857,6 @@ ClusterCom::sendClusterMessage(int msg_type, const char *args)
   return ret;
 } /* End ClusterCom::sendClusterMessage */
 
-
 bool
 ClusterCom::sendReliableMessage(unsigned long addr, char *buf, int len)
 {
@@ -1873,7 +1893,6 @@ ClusterCom::rl_sendReliableMessage(unsigned long addr, const char *buf, int len)
   serv_addr.sin_addr.s_addr = addr;
   serv_addr.sin_port = htons(cport);
 
-
   if ((fd = mgmt_socket(AF_INET, SOCK_STREAM, 0)) < 0) {
     mgmt_elog(errno, "[ClusterCom::rl_sendReliableMessage] Unable to create socket\n");
     return false;
@@ -1899,7 +1918,6 @@ ClusterCom::rl_sendReliableMessage(unsigned long addr, const char *buf, int len)
   return true;
 } /* End ClusterCom::rl_sendReliableMessage */
 
-
 /*
  * sendReliableMessage(...)
  *   Used to send a string across the reliable fd.
@@ -1983,7 +2001,6 @@ ClusterCom::sendReliableMessage(unsigned long addr, char *buf, int len, char *re
   return true;
 } /* End ClusterCom::sendReliableMessage */
 
-
 /*
  * sendReliableMessage(...)
  *   Used to send a string across the reliable fd.
@@ -2017,7 +2034,8 @@ ClusterCom::sendReliableMessageReadTillClose(unsigned long addr, char *buf, int
     return false;
   }
   if (fcntl(fd, F_SETFD, 1) < 0) {
-    mgmt_elog(errno, "[ClusterCom::sendReliableMessageReadTillClose] Unable to set close-on-exec.\n");
+    mgmt_elog(errno, "[ClusterCom::sendReliableMessageReadTillClose] Unable to "
+                     "set close-on-exec.\n");
     ink_mutex_release(&mutex);
     close(fd);
     return false;
@@ -2036,7 +2054,9 @@ ClusterCom::sendReliableMessageReadTillClose(unsigned long addr, char *buf, int
     close_socket(fd);
     return false;
   } else {
-    Debug("ccom", "[ClusterCom::sendReliableMessageREadTillClose] Sent '%s' len: %d on fd: %d\n", buf, len, fd);
+    Debug("ccom", "[ClusterCom::sendReliableMessageREadTillClose] Sent '%s' "
+                  "len: %d on fd: %d\n",
+          buf, len, fd);
   }
 
   memset(tmp_reply, 0, 1024);
@@ -2061,7 +2081,6 @@ ClusterCom::sendReliableMessageReadTillClose(unsigned long addr, char *buf, int
   return true;
 } /* End ClusterCom::sendReliableMessageReadTillClose */
 
-
 /*
  * receiveIncomingMessage
  *   This function reads from the incoming channel. It is blocking,
@@ -2078,7 +2097,6 @@ ClusterCom::receiveIncomingMessage(char *buf, int max)
   return nbytes;
 } /* End ClusterCom::processIncomingMessages */
 
-
 /*
  * isMaster()
  *   Function checks known hosts and decides whether this local manager is
@@ -2112,7 +2130,6 @@ ClusterCom::isMaster()
   return false;
 } /* End ClusterCom::isMaster */
 
-
 /*
  * lowestPeer()
  *   Function finds the peer with the lowest number of current virtual
@@ -2148,7 +2165,6 @@ ClusterCom::lowestPeer(int *no)
   return min_ip;
 } /* End ClusterCom::lowestPeer */
 
-
 void
 ClusterCom::logClusterMismatch(const char *ip, ClusterMismatch type, char *data)
 {
@@ -2187,7 +2203,6 @@ ClusterCom::logClusterMismatch(const char *ip, ClusterMismatch type, char *data)
   ink_hash_table_insert(mismatchLog, ip, (void *)type);
 }
 
-
 /*
  * highestPeer()
  *   Function finds the peer with the highest number of current virtual
@@ -2224,7 +2239,6 @@ ClusterCom::highestPeer(int *no)
   return max_ip;
 } /* End ClusterCom::highestPeer */
 
-
 /*
  * checkBackDoor(...)
  *   Function checks for "backdoor" commands on the cluster port.
@@ -2369,7 +2383,6 @@ checkBackDoor(int req_fd, char *message)
                (int64_t)tmp->idle_ticks, tmp->last_time_recorded, tmp->delta, (int64_t)tmp->manager_idle_ticks, tmp->manager_alive);
       mgmt_writeline(req_fd, reply, strlen(reply));
 
-
       tmp_msg = "---------------------------\n";
       mgmt_writeline(req_fd, tmp_msg, strlen(tmp_msg));
     }
@@ -2397,7 +2410,8 @@ checkBackDoor(int req_fd, char *message)
     mgmt_writeline(req_fd, reply, strlen(reply));
 
 // XXX: Again multiple code caused by misssing PID_T_FMT
-// TODO: Was #if defined(solaris) && (!defined(_FILE_OFFSET_BITS) || _FILE_OFFSET_BITS != 64)
+// TODO: Was #if defined(solaris) && (!defined(_FILE_OFFSET_BITS) ||
+// _FILE_OFFSET_BITS != 64)
 #if defined(solaris)
     snprintf(reply, sizeof(reply), "\twatched_process_fd: %d  watched_process_pid: %ld\n", lmgmt->watched_process_fd,
              (long int)lmgmt->watched_process_pid);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/cluster/ClusterCom.h
----------------------------------------------------------------------
diff --git a/mgmt/cluster/ClusterCom.h b/mgmt/cluster/ClusterCom.h
index 7687f2f..599f399 100644
--- a/mgmt/cluster/ClusterCom.h
+++ b/mgmt/cluster/ClusterCom.h
@@ -58,12 +58,17 @@ class FileManager;
 #define MAX_MC_GROUP_LEN 20
 #define MAX_NODE_SYSINFO_STRING 32
 
-#define CLUSTER_CONFIG_FILE_BLURB                                                                                                  \
-  "# Cluster Configuration file\n#\n# This file is machine generated and machine parsed.\n# Please do not change this file by "    \
-  "hand.\n#\n# This file designates the machines which make up the cluster\n# proper.  Data and load are distributed among these " \
-  "machines.\n#\n############################################################################\n# Number\n# IP:Port \n# "           \
-  "...\n############################################################################\n# Number = { 0, 1 ... } where 0 is a "       \
-  "stand-alone proxy\n# IP:Port = IP address: cluster accept port number\n#\n# Example 1: stand-alone proxy\n# 0\n#\n# Example "   \
+#define CLUSTER_CONFIG_FILE_BLURB                                              \
+  "# Cluster Configuration file\n#\n# This file is machine generated and "     \
+  "machine parsed.\n# Please do not change this file by "                      \
+  "hand.\n#\n# This file designates the machines which make up the "           \
+  "cluster\n# proper.  Data and load are distributed among these "             \
+  "machines.\n#\n############################################################" \
+  "################\n# Number\n# IP:Port \n# "                                 \
+  "...\n#####################################################################" \
+  "#######\n# Number = { 0, 1 ... } where 0 is a "                             \
+  "stand-alone proxy\n# IP:Port = IP address: cluster accept port "            \
+  "number\n#\n# Example 1: stand-alone proxy\n# 0\n#\n# Example "              \
   "2: 3 machines\n# 3\n# 127.1.2.3:83\n# 127.1.2.4:83\n# 127.1.2.5:83\n#\n"
 
 enum MgmtClusterType {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/cluster/VMap.cc
----------------------------------------------------------------------
diff --git a/mgmt/cluster/VMap.cc b/mgmt/cluster/VMap.cc
index 710a44f..2fa9b40 100644
--- a/mgmt/cluster/VMap.cc
+++ b/mgmt/cluster/VMap.cc
@@ -57,7 +57,6 @@ vmapEnableHandler(const char *tok, RecDataT /* data_type ATS_UNUSED */, RecData
   return 0;
 } /* End vmapEnableHandler */
 
-
 VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
 {
   bool found;
@@ -77,7 +76,6 @@ VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
   num_addrs = 0;
   num_nics = 0;
 
-
   this->interface = ats_strdup(interface);
   turning_off = false; // we are not turning off VIP
 
@@ -103,8 +101,9 @@ VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
     num_nics++;
     ink_hash_table_insert(interface_realip_map, interface, (void *)tmp_realip_info);
     if (enabled) {
-      mgmt_log("[VMap::Vmap] Added cluster interface '%s' real ip: '%s' to known interfaces\n", interface,
-               inet_ntoa(tmp_realip_info->real_ip));
+      mgmt_log("[VMap::Vmap] Added cluster interface '%s' real ip: '%s' to "
+               "known interfaces\n",
+               interface, inet_ntoa(tmp_realip_info->real_ip));
     }
   }
   {
@@ -152,7 +151,9 @@ VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
 
         // Get the address of the interface
         if (ioctl(tmp_socket, SIOCGIFADDR, (char *)ifr) < 0) {
-          mgmt_log("[VMap::VMap] Unable obtain address for network interface %s, presuming unused\n", ifr->ifr_name);
+          mgmt_log("[VMap::VMap] Unable obtain address for network interface "
+                   "%s, presuming unused\n",
+                   ifr->ifr_name);
         } else {
           InkHashTableValue hash_value;
 
@@ -177,8 +178,9 @@ VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
               ink_hash_table_insert(interface_realip_map, ifr->ifr_name, (void *)tmp_realip_info);
               num_nics++;
               if (enabled) {
-                mgmt_log("[VMap::Vmap] Added interface '%s' real ip: '%s' to known interfaces\n", ifr->ifr_name,
-                         inet_ntoa(tmp_realip_info->real_ip));
+                mgmt_log("[VMap::Vmap] Added interface '%s' real ip: '%s' to "
+                         "known interfaces\n",
+                         ifr->ifr_name, inet_ntoa(tmp_realip_info->real_ip));
               }
             }
           } else {
@@ -198,7 +200,6 @@ VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
     close(tmp_socket);
   }
 
-
   RecRegisterConfigUpdateCb("proxy.config.vmap.enabled", vmapEnableHandler, NULL);
 
   down_up_timeout = REC_readInteger("proxy.config.vmap.down_up_timeout", &found);
@@ -212,7 +213,6 @@ VMap::VMap(char *interface, unsigned long ip, ink_mutex *m)
 
 } /* End VMap::VMap */
 
-
 VMap::~VMap()
 {
   if (id_map)
@@ -225,7 +225,6 @@ VMap::~VMap()
   ats_free(addr_list);
 } /* End VMap::~VMap */
 
-
 /*
  * lt_runGambit()
  *   Function basically runs the virtual ip assignment gambit. If you are
@@ -242,7 +241,6 @@ VMap::lt_runGambit()
   bool init = false;
   struct in_addr virtual_addr, real_addr;
 
-
   if (!enabled) {
     return;
   }
@@ -282,13 +280,14 @@ VMap::lt_runGambit()
     }
   }
 
-
   for (i = 0; i < num_addrs; i++) { /* Check for conflicts with your interfaces */
     virtual_addr.s_addr = addr_list[i];
     ink_strlcpy(vaddr, inet_ntoa(virtual_addr), sizeof(vaddr));
 
     if ((conf_addr = rl_checkConflict(vaddr))) {
-      mgmt_log(stderr, "[VMap::lt_runGambit] Conflict w/addr: '%s' - Unable to use virtual address.\n", vaddr);
+      mgmt_log(stderr, "[VMap::lt_runGambit] Conflict w/addr: '%s' - Unable to "
+                       "use virtual address.\n",
+               vaddr);
       ats_free(conf_addr);
       break;
     }
@@ -298,7 +297,6 @@ VMap::lt_runGambit()
   return;
 } /* End VMap::lt_runGambit */
 
-
 /*
  * lt_readAListFile(...)
  *   Function reads in the virtual ip list, basically a parsing routine for the
@@ -315,7 +313,9 @@ VMap::lt_readAListFile(const char *fname)
   ats_scoped_str vaddr_path(RecConfigReadConfigPath(NULL, fname));
 
   if (!(fin = fopen(vaddr_path, "r"))) {
-    mgmt_log(stderr, "[VMap::lt_readAListFile] Unable to open file: %s, addr list unchanged\n", (const char *)vaddr_path);
+    mgmt_log(stderr, "[VMap::lt_readAListFile] Unable to open file: %s, addr "
+                     "list unchanged\n",
+             (const char *)vaddr_path);
     return;
   }
 
@@ -327,8 +327,10 @@ VMap::lt_readAListFile(const char *fname)
 
   id_map = ink_hash_table_create(InkHashTableKeyType_String);
   while (fgets(buf, 1024, fin)) {
-    // since each of the tmp_addr, tmp_interface, tmp_id has length 1024 which is not less than buf
-    // so here we don't need to worry about overflow, disable coverity check for this line
+    // since each of the tmp_addr, tmp_interface, tmp_id has length 1024 which
+    // is not less than buf
+    // so here we don't need to worry about overflow, disable coverity check for
+    // this line
     // coverity[secure_coding]
     if (buf[0] != '#' && isascii(buf[0]) && isdigit(buf[0]) && (sscanf(buf, "%s %s %s\n", tmp_addr, tmp_interface, tmp_id) == 3)) {
       tmp_num_addrs++;
@@ -352,8 +354,10 @@ VMap::lt_readAListFile(const char *fname)
     InkHashTableValue hash_value;
 
     /* Make sure we have a valid line and its not commented */
-    // since each of the tmp_addr, tmp_interface, tmp_id has length 1024 which is not less than buf
-    // so here we don't need to worry about overflow, disable coverity check for this line
+    // since each of the tmp_addr, tmp_interface, tmp_id has length 1024 which
+    // is not less than buf
+    // so here we don't need to worry about overflow, disable coverity check for
+    // this line
     // coverity[secure_coding]
     if (!isascii(buf[0]) || !isdigit(buf[0]) || (sscanf(buf, "%s %s %s\n", tmp_addr, tmp_interface, tmp_id) != 3)) {
       continue;
@@ -390,7 +394,6 @@ VMap::lt_readAListFile(const char *fname)
   return;
 } /* End VMap::lt_readAListFile */
 
-
 /*
  * rl_resetSeenFlag(...)
  *   Function resets the "seen" flag for a given peer's mapped addrs.
@@ -413,7 +416,6 @@ VMap::rl_resetSeenFlag(char *ip)
   return;
 } /* End VMap::rl_resetSeenFlag */
 
-
 /*
  * rl_clearUnSeen(...)
  *   This function is a sweeper function to clean up the map.
@@ -442,7 +444,6 @@ VMap::rl_clearUnSeen(char *ip)
   return numAddrs;
 } /* End VMap::rl_clearUnSeen */
 
-
 /*
  * rl_remote_map(...)
  *   Function sends the up interface command to a remote node.
@@ -464,7 +465,6 @@ VMap::rl_remote_map(char *virt_ip, char *real_ip)
   return true;
 } /* End VMap::rl_remote_map */
 
-
 /*
  * rl_remote_unmap(...)
  *   Function sends the up interface command to a remote node.
@@ -485,7 +485,6 @@ VMap::rl_remote_unmap(char *virt_ip, char *real_ip)
   return true;
 } /* End VMap::rl_remote_unmap */
 
-
 /*
  * rl_map(...)
  *   Function maps a virt_ip to a real_ip, if real_ip is NULL it maps it
@@ -517,14 +516,15 @@ VMap::rl_map(char *virt_ip, char *real_ip)
   *entry = true;
 
   if (!real_ip) {
-    mgmt_elog(0, "[VMap::rl_map] no real ip associated with virtual ip %s, mapping to local\n", buf);
+    mgmt_elog(0, "[VMap::rl_map] no real ip associated with virtual ip %s, "
+                 "mapping to local\n",
+              buf);
     last_map_change = time(NULL);
   }
   ink_hash_table_insert(tmp, buf, (void *)entry);
   return true;
 } /* End VMap::rl_map */
 
-
 bool
 VMap::rl_unmap(char *virt_ip, char *real_ip)
 {
@@ -552,7 +552,6 @@ VMap::rl_unmap(char *virt_ip, char *real_ip)
   return true;
 } /* End VMap::rl_unmap */
 
-
 /*
  * rl_checkConflict(...)
  *   This function checks for virt conflicts between the local node and
@@ -598,7 +597,6 @@ VMap::rl_checkConflict(char *virt_ip)
   return NULL;
 } /* End VMap::rl_checkConflict */
 
-
 /*
  * checkGlobConflict(...)
  *   This function checks for conflict in the local map as well as the
@@ -696,7 +694,6 @@ VMap::rl_remap(char *virt_ip, char *cur_ip, char *dest_ip, int cur_naddr, int de
   return true;
 } /* End VMap::rl_remap */
 
-
 /*
  * boundAddr(...)
  *   Function tests whether or not the addr is bound. Returns 0(not bound),
@@ -724,7 +721,6 @@ VMap::rl_boundAddr(char *virt_ip)
   return 0;
 } /* End VMap::rl_boundAddr */
 
-
 /*
  * boundTo(...)
  *   Function returns ip addr(string form) of the node that the virt address
@@ -759,7 +755,6 @@ VMap::rl_boundTo(char *virt_ip)
   return 0;
 } /* End VMap::rl_boundTo */
 
-
 /*
  * constructVMapMessage(...)
  *   Constructs the broadcast message of the local nodes virtual ip map.
@@ -844,7 +839,6 @@ VMap::downAddrs()
   return;
 } /* End VMap::downAddrs */
 
-
 void
 VMap::downOurAddrs()
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/cluster/VMap.h
----------------------------------------------------------------------
diff --git a/mgmt/cluster/VMap.h b/mgmt/cluster/VMap.h
index 2959ab8..a270284 100644
--- a/mgmt/cluster/VMap.h
+++ b/mgmt/cluster/VMap.h
@@ -29,19 +29,16 @@
 #define MAX_INTERFACE 16
 #define MAX_SUB_ID 8
 
-
 typedef struct _vip_info {
   char interface[MAX_INTERFACE];
   char sub_interface_id[MAX_SUB_ID];
 } VIPInfo;
 
-
 typedef struct _realip_info {
   struct in_addr real_ip;
   bool mappings_for_interface;
 } RealIPInfo;
 
-
 /*
  * class VMap
  *   Class implements the protocol and support functions for mapping the
@@ -108,7 +105,8 @@ public:
   ink_mutex *mutex;
   // Map of virtual ip addresses assigned to the local node
   InkHashTable *our_map;
-  // Map of virtual ip addresses assigned to other nodes; as indicated through multicast messages; used
+  // Map of virtual ip addresses assigned to other nodes; as indicated through
+  // multicast messages; used
   // to detect conflicts
   InkHashTable *ext_map;
   InkHashTable *id_map;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/utils/MgmtHashTable.h
----------------------------------------------------------------------
diff --git a/mgmt/utils/MgmtHashTable.h b/mgmt/utils/MgmtHashTable.h
index 1dbc12b..86a946a 100644
--- a/mgmt/utils/MgmtHashTable.h
+++ b/mgmt/utils/MgmtHashTable.h
@@ -189,5 +189,4 @@ private:
 
 }; /* End class MgmtHashTable */
 
-
 #endif /* _MGMT_HASH_TABLE_H */

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/utils/MgmtMarshall.h
----------------------------------------------------------------------
diff --git a/mgmt/utils/MgmtMarshall.h b/mgmt/utils/MgmtMarshall.h
index 7e0f752..ea6784f 100644
--- a/mgmt/utils/MgmtMarshall.h
+++ b/mgmt/utils/MgmtMarshall.h
@@ -31,19 +31,23 @@
 // Simple message marshalling.
 //
 // MGMT_MARSHALL_INT
-// Wire size is 4 bytes signed. This type is used for enum and boolean values, as well as embedded lengths and general
+// Wire size is 4 bytes signed. This type is used for enum and boolean values,
+// as well as embedded lengths and general
 // integer values.
 //
 // MGMT_MARSHALL_LONG
 // Wire size is 8 bytes signed.
 //
 // MGMT_MARSHALL_STRING
-// Wire size is a 4 byte length followed by N bytes. The trailing NUL is always sent and NULL strings are sent as empty
-// strings. This means that the minimum wire size for a string is 5 bytes (4 byte length + NUL byte). The unmarshalled
+// Wire size is a 4 byte length followed by N bytes. The trailing NUL is always
+// sent and NULL strings are sent as empty
+// strings. This means that the minimum wire size for a string is 5 bytes (4
+// byte length + NUL byte). The unmarshalled
 // string point is guaranteed to be non-NULL.
 //
 // MGMT_MARSHALL_DATA
-// Wire size is 4 byte length followed by N data bytes. If the length is 0, no subsequent bytes are sent. In this case
+// Wire size is 4 byte length followed by N data bytes. If the length is 0, no
+// subsequent bytes are sent. In this case
 // the unmarshalled data pointer is guaranteed to be NULL.
 //
 enum MgmtMarshallType {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/utils/MgmtUtils.cc
----------------------------------------------------------------------
diff --git a/mgmt/utils/MgmtUtils.cc b/mgmt/utils/MgmtUtils.cc
index 0d9bb26..f4d1834 100644
--- a/mgmt/utils/MgmtUtils.cc
+++ b/mgmt/utils/MgmtUtils.cc
@@ -476,7 +476,8 @@ mgmt_getAddrForIntr(char *intrName, sockaddr *addr, int *mtu)
     ifc.ifc_buf = ifbuf;
     if (ioctl(fakeSocket, SIOCGIFCONF, &ifc) < 0) {
       if (errno != EINVAL || lastlen != 0) {
-        mgmt_fatal(stderr, errno, "[getAddrForIntr] Unable to read network interface configuration\n");
+        mgmt_fatal(stderr, errno, "[getAddrForIntr] Unable to read network "
+                                  "interface configuration\n");
       }
     } else {
       if (ifc.ifc_len == lastlen) {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/mgmt/utils/test_marshall.cc
----------------------------------------------------------------------
diff --git a/mgmt/utils/test_marshall.cc b/mgmt/utils/test_marshall.cc
index 6cc1847..aac7968 100644
--- a/mgmt/utils/test_marshall.cc
+++ b/mgmt/utils/test_marshall.cc
@@ -164,7 +164,8 @@ REGRESSION_TEST(MessageReadWriteA)(RegressionTest *t, int /* atype ATS_UNUSED */
   mlong = (MgmtMarshallLong)(&listenfd);
 
   // Check invalid Fd write. ToDo: Commented out, see TS-3052.
-  // CHECK_EQ(mgmt_message_write(FD_SETSIZE - 1, ifields, countof(ifields), &mint, &mlong), -1);
+  // CHECK_EQ(mgmt_message_write(FD_SETSIZE - 1, ifields, countof(ifields),
+  // &mint, &mlong), -1);
 
   CHECK_EQ(mgmt_message_write(clientfd, ifields, countof(ifields), &mint, &mlong), 12);
 
@@ -305,7 +306,8 @@ REGRESSION_TEST(MessageLength)(RegressionTest *t, int /* atype ATS_UNUSED */, in
   mstring = (char *)"";
   CHECK_EQ(mgmt_message_length(sfields, countof(sfields), &mstring), 4 + 1);
 
-  // data fields include a 4-byte length. We don't go looking at the data in this case.
+  // data fields include a 4-byte length. We don't go looking at the data in
+  // this case.
   mdata.len = 99;
   mdata.ptr = NULL;
   CHECK_EQ(mgmt_message_length(dfields, countof(dfields), &mdata), 99 + 4);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ControlMatcher.cc
----------------------------------------------------------------------
diff --git a/proxy/ControlMatcher.cc b/proxy/ControlMatcher.cc
index b2e0748..0897461 100644
--- a/proxy/ControlMatcher.cc
+++ b/proxy/ControlMatcher.cc
@@ -430,7 +430,8 @@ RegexMatcher<Data, Result>::AllocateSpace(int num_entries)
 }
 
 //
-// config_parse_error RegexMatcher<Data,Result>::NewEntry(matcher_line* line_info)
+// config_parse_error RegexMatcher<Data,Result>::NewEntry(matcher_line*
+// line_info)
 //
 template <class Data, class Result>
 config_parse_error
@@ -663,7 +664,8 @@ IpMatcher<Data, Result>::NewEntry(matcher_line *line_info)
 }
 
 //
-// void IpMatcherData,Result>::Match(in_addr_t addr, RequestData* rdata, Result* result)
+// void IpMatcherData,Result>::Match(in_addr_t addr, RequestData* rdata, Result*
+// result)
 //
 template <class Data, class Result>
 void
@@ -677,7 +679,6 @@ IpMatcher<Data, Result>::Match(sockaddr const *addr, RequestData *rdata, Result
   }
 }
 
-
 template <class Data, class Result>
 void
 IpMatcher<Data, Result>::Print()
@@ -757,7 +758,6 @@ ControlMatcher<Data, Result>::Print()
   }
 }
 
-
 // void ControlMatcher<Data, Result>::Match(RequestData* rdata
 //                                          Result* result)
 //
@@ -970,7 +970,6 @@ ControlMatcher<Data, Result>::BuildTable()
   return ret;
 }
 
-
 /****************************************************************
  *    TEMPLATE INSTANTIATIONS GO HERE
  *

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/ControlMatcher.h
----------------------------------------------------------------------
diff --git a/proxy/ControlMatcher.h b/proxy/ControlMatcher.h
index 999d09f..ee62c7f 100644
--- a/proxy/ControlMatcher.h
+++ b/proxy/ControlMatcher.h
@@ -36,7 +36,8 @@
  *  configuration file and builds the lookup table
  *
  *     Four types of matched are supported: hostname, domain name, ip address
- *  and URL regex.  For these four types, three lookup tables are used.  Regex and
+ *  and URL regex.  For these four types, three lookup tables are used.  Regex
+ *and
  *  ip lookups have there own tables and host and domain lookups share a single
  *  table
  *
@@ -163,7 +164,6 @@ public:
   bool internal_txn;
 };
 
-
 template <class Data, class Result> class UrlMatcher
 {
 public:
@@ -196,7 +196,6 @@ protected:
   const char *file_name;    // Used for Debug/Warning/Error messages
 };
 
-
 template <class Data, class Result> class RegexMatcher
 {
 public:
@@ -301,7 +300,6 @@ public:
   const char *file_name;    // Used for Debug/Warning/Error messages
 };
 
-
 #define ALLOW_HOST_TABLE 1 << 0
 #define ALLOW_IP_TABLE 1 << 1
 #define ALLOW_REGEX_TABLE 1 << 2

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/CoreUtils.cc
----------------------------------------------------------------------
diff --git a/proxy/CoreUtils.cc b/proxy/CoreUtils.cc
index 32c3bff..120dba0 100644
--- a/proxy/CoreUtils.cc
+++ b/proxy/CoreUtils.cc
@@ -28,7 +28,6 @@
    Description:  Automated processing of core files on Linux
  ****************************************************************************/
 
-
 /*
    Stack Unwinding procedure on ix86 architecture on Linux :
    Get the first frame pointer in $ebp.
@@ -77,8 +76,10 @@
  *        +-----------------+     +-----------------+
  */
 
-/* 32-bit arguments are pushed down stack in reverse syntactic order (hence accessed/popped in the right order), above the 32-bit
- * near return address. %ebp, %esi, %edi, %ebx are callee-saved, other registers are caller-saved; %eax is to hold the result, or
+/* 32-bit arguments are pushed down stack in reverse syntactic order (hence
+ * accessed/popped in the right order), above the 32-bit
+ * near return address. %ebp, %esi, %edi, %ebx are callee-saved, other registers
+ * are caller-saved; %eax is to hold the result, or
  * %edx:%eax for 64-bit results */
 
 /*    has -fomit-frame-pointer has any repercussions??
@@ -128,7 +129,6 @@ char netvc_ptr_str[256] = "";
 HdrHeap *swizzle_heap;
 char *ptr_data;
 
-
 // returns the index of the vaddr or the index after where it should be
 intptr_t
 CoreUtils::find_vaddr(intptr_t vaddr, intptr_t upper, intptr_t lower)
@@ -208,7 +208,6 @@ CoreUtils::insert_table(intptr_t vaddr1, intptr_t offset1, intptr_t fsize1)
   }
 }
 
-
 // returns -1 on failure otherwise fills the buffer and
 // returns the number of bytes read
 intptr_t
@@ -244,7 +243,6 @@ CoreUtils::read_from_core(intptr_t vaddr, intptr_t bytes, char *buf)
   return -1;
 }
 
-
 /* Linux Specific functions */
 
 #if defined(linux)
@@ -362,7 +360,6 @@ CoreUtils::find_stuff(StuffTest_f f)
 }
 #endif // linux check
 
-
 // test whether a given register is an HttpSM
 //   if it is, call process_HttpSM on it
 void
@@ -477,7 +474,6 @@ CoreUtils::process_HttpSM(HttpSM *core_ptr)
     printf("process_HttpSM : last_seen_http_sm == core_ptr\n");
 }
 
-
 void
 CoreUtils::print_http_hdr(HTTPHdr *h, const char *name)
 {
@@ -792,7 +788,6 @@ CoreUtils::process_NetVC(UnixNetVConnection *nvc_test)
   ats_free(buf);
 }
 
-
 char *
 CoreUtils::load_string(const char *addr)
 {