You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by am...@apache.org on 2015/07/19 16:14:42 UTC

[2/8] trafficserver git commit: TS-974: Partial Object Caching.

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpCacheSM.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpCacheSM.h b/proxy/http/HttpCacheSM.h
index 3c14da3..0dbc2ae 100644
--- a/proxy/http/HttpCacheSM.h
+++ b/proxy/http/HttpCacheSM.h
@@ -70,8 +70,12 @@ public:
 
   Action *open_read(const HttpCacheKey *key, URL *url, HTTPHdr *hdr, CacheLookupHttpConfig *params, time_t pin_in_cache);
 
-  Action *open_write(const HttpCacheKey *key, URL *url, HTTPHdr *request, CacheHTTPInfo *old_info, time_t pin_in_cache, bool retry,
-                     bool allow_multiple);
+  /** Open a cache read VC for the same object as the writer.
+      @return @c true if there was no reader and one was successfully created from the writer.
+  */
+  Action *open_partial_read(HTTPHdr *client_request_hdr);
+
+  Action *open_write(const HttpCacheKey *key, URL *url, HTTPHdr *request, CacheHTTPInfo *old_info, time_t pin_in_cache, bool retry, bool allow_multiple);
 
   CacheVConnection *cache_read_vc;
   CacheVConnection *cache_write_vc;
@@ -149,6 +153,7 @@ private:
 
   int state_cache_open_read(int event, void *data);
   int state_cache_open_write(int event, void *data);
+  int state_cache_open_partial_read(int evid, void *data);
 
   HttpCacheAction captive_action;
   bool open_read_cb;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpDebugNames.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpDebugNames.cc b/proxy/http/HttpDebugNames.cc
index 7db357d..ea293df 100644
--- a/proxy/http/HttpDebugNames.cc
+++ b/proxy/http/HttpDebugNames.cc
@@ -386,6 +386,8 @@ HttpDebugNames::get_action_name(HttpTransact::StateMachineAction_t e)
     return ("SM_ACTION_API_POST_REMAP");
   case HttpTransact::SM_ACTION_POST_REMAP_SKIP:
     return ("SM_ACTION_POST_REMAP_SKIP");
+  case HttpTransact::SM_ACTION_CACHE_OPEN_PARTIAL_READ:
+    return "SM_ACTION_CACHE_OPEN_PARTIAL_READ";
   }
 
   return ("unknown state name");

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpSM.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc
index 0e37f22..6d270f9 100644
--- a/proxy/http/HttpSM.cc
+++ b/proxy/http/HttpSM.cc
@@ -253,10 +253,11 @@ HttpVCTable::cleanup_all()
     DebugSM("http", "[%" PRId64 "] [%s, %s]", sm_id, #state_name, HttpDebugNames::get_event_name(event)); \
   }
 
-#define HTTP_SM_SET_DEFAULT_HANDLER(_h) \
-  {                                     \
-    REMEMBER(-1, reentrancy_count);     \
-    default_handler = _h;               \
+#define HTTP_SM_SET_DEFAULT_HANDLER(_h)                                          \
+  {                                                                              \
+    REMEMBER(-1, reentrancy_count);                                              \
+    Debug("amc", "SM %" PRId64 " default handler = %s", sm_id, handlerName(_h)); \
+    default_handler = _h;                                                        \
   }
 
 
@@ -363,7 +364,6 @@ HttpSM::init()
   t_state.force_dns = (ip_rule_in_CacheControlTable() || t_state.parent_params->ParentTable->ipMatch ||
                        !(t_state.txn_conf->doc_in_cache_skip_dns) || !(t_state.txn_conf->cache_http));
 
-  http_parser.m_allow_non_http = t_state.http_config_param->parser_allow_non_http;
   http_parser_init(&http_parser);
 
   SET_HANDLER(&HttpSM::main_handler);
@@ -617,39 +617,26 @@ HttpSM::state_read_client_request_header(int event, void *data)
   // We need to handle EOS as well as READ_READY because the client
   // may have sent all of the data already followed by a fIN and that
   // should be OK.
-  if (is_transparent_passthrough_allowed() && ua_raw_buffer_reader != NULL) {
-    bool do_blind_tunnel = false;
-    // If we had a parse error and we're done reading data
-    // blind tunnel
-    if ((event == VC_EVENT_READ_READY || event == VC_EVENT_EOS) && state == PARSE_ERROR) {
-      do_blind_tunnel = true;
-
-      // If we had a GET request that has data after the
-      // get request, do blind tunnel
-    } else if (state == PARSE_DONE && t_state.hdr_info.client_request.method_get_wksidx() == HTTP_WKSIDX_GET &&
-               ua_raw_buffer_reader->read_avail() > 0 && !t_state.hdr_info.client_request.is_keep_alive_set()) {
-      do_blind_tunnel = true;
-    }
-    if (do_blind_tunnel) {
-      DebugSM("http", "[%" PRId64 "] first request on connection failed parsing, switching to passthrough.", sm_id);
-
-      t_state.transparent_passthrough = true;
-      http_parser_clear(&http_parser);
-
-      // Turn off read eventing until we get the
-      // blind tunnel infrastructure set up
-      ua_session->get_netvc()->do_io_read(this, 0, NULL);
-
-      /* establish blind tunnel */
-      setup_blind_tunnel_port();
+  if ((event == VC_EVENT_READ_READY || event == VC_EVENT_EOS) && state == PARSE_ERROR && is_transparent_passthrough_allowed() &&
+      ua_raw_buffer_reader != NULL) {
+    DebugSM("http", "[%" PRId64 "] first request on connection failed parsing, switching to passthrough.", sm_id);
 
-      // Setting half close means we will send the FIN when we've written all of the data.
-      if (event == VC_EVENT_EOS) {
-        this->set_ua_half_close_flag();
-        t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
-      }
-      return 0;
+    t_state.transparent_passthrough = true;
+    http_parser_clear(&http_parser);
+
+    // Turn off read eventing until we get the
+    // blind tunnel infrastructure set up
+    ua_session->get_netvc()->do_io_read(this, 0, NULL);
+
+    /* establish blind tunnel */
+    setup_blind_tunnel_port();
+
+    // Setting half close means we will send the FIN when we've written all of the data.
+    if (event == VC_EVENT_EOS) {
+      this->set_ua_half_close_flag();
+      t_state.client_info.keep_alive = HTTP_NO_KEEPALIVE;
     }
+    return 0;
   }
 
   // Check to see if we are done parsing the header
@@ -1450,6 +1437,8 @@ HttpSM::state_api_callout(int event, void *data)
 void
 HttpSM::handle_api_return()
 {
+  HttpTunnelProducer *p = 0; // used as a scratch var in various cases.
+
   switch (t_state.api_next_action) {
   case HttpTransact::SM_ACTION_API_SM_START:
     if (t_state.client_info.port_attribute == HttpProxyPort::TRANSPORT_BLIND_TUNNEL) {
@@ -1496,12 +1485,11 @@ HttpSM::handle_api_return()
   }
 
   switch (t_state.next_action) {
-  case HttpTransact::SM_ACTION_TRANSFORM_READ: {
-    HttpTunnelProducer *p = setup_transfer_from_transform();
+  case HttpTransact::SM_ACTION_TRANSFORM_READ:
+    p = setup_transfer_from_transform();
     perform_transform_cache_write_action();
     tunnel.tunnel_run(p);
     break;
-  }
   case HttpTransact::SM_ACTION_SERVER_READ: {
     if (unlikely(t_state.did_upgrade_succeed)) {
       // We've sucessfully handled the upgrade, let's now setup
@@ -1512,14 +1500,30 @@ HttpSM::handle_api_return()
 
       setup_blind_tunnel(true);
     } else {
-      HttpTunnelProducer *p = setup_server_transfer();
-      perform_cache_write_action();
-      tunnel.tunnel_run(p);
+      if ((t_state.range_setup == HttpTransact::RANGE_PARTIAL_WRITE || t_state.range_setup == HttpTransact::RANGE_PARTIAL_UPDATE) &&
+          HttpTransact::CACHE_DO_WRITE == t_state.cache_info.action) {
+        Debug("amc", "Set up for partial read");
+        CacheVConnection *save_write_vc = cache_sm.cache_write_vc;
+        tunnel.tunnel_run(setup_server_transfer_to_cache_only());
+        t_state.next_action = HttpTransact::SM_ACTION_CACHE_OPEN_PARTIAL_READ;
+        t_state.source = HttpTransact::SOURCE_CACHE;
+        HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_partial_read);
+        cache_sm.cache_write_vc = save_write_vc;
+        // Close the read VC if it's there because it's less work than trying to reset the existing
+        // one (which doesn't have the ODE attached).
+        cache_sm.close_read();
+        pending_action = cache_sm.open_partial_read(&t_state.hdr_info.client_request);
+        cache_sm.cache_write_vc = NULL;
+      } else {
+        p = setup_server_transfer();
+        perform_cache_write_action();
+        tunnel.tunnel_run(p);
+      }
     }
     break;
   }
   case HttpTransact::SM_ACTION_SERVE_FROM_CACHE: {
-    HttpTunnelProducer *p = setup_cache_read_transfer();
+    p = setup_cache_read_transfer();
     tunnel.tunnel_run(p);
     break;
   }
@@ -2355,7 +2359,6 @@ HttpSM::state_cache_open_write(int event, void *data)
     // The write vector was locked and the cache_sm retried
     // and got the read vector again.
     cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
-    // ToDo: Should support other levels of cache hits here, but the cache does not support it (yet)
     if (cache_sm.cache_read_vc->is_ram_cache_hit()) {
       t_state.cache_info.hit_miss_code = SQUID_HIT_RAM;
     } else {
@@ -2443,7 +2446,7 @@ HttpSM::state_cache_open_read(int event, void *data)
     t_state.source = HttpTransact::SOURCE_CACHE;
 
     cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
-    // ToDo: Should support other levels of cache hits here, but the cache does not support it (yet)
+    // ToDo: Should support other levels of cache hits here, but the cache does
     if (cache_sm.cache_read_vc->is_ram_cache_hit()) {
       t_state.cache_info.hit_miss_code = SQUID_HIT_RAM;
     } else {
@@ -2481,6 +2484,62 @@ HttpSM::state_cache_open_read(int event, void *data)
   return 0;
 }
 
+//////////////////////////////////////////////////////////////////////////
+//
+//  HttpSM::state_cache_open_read_from_writer()
+//
+//  Handle the case where a partial request had a cache miss and we sent
+//  a request to the origin which has now come back successfully. We
+//  need to create a reader cache VC to handle the read side of the
+//  operation.
+//////////////////////////////////////////////////////////////////////////
+int
+HttpSM::state_cache_open_partial_read(int event, void *data)
+{
+  STATE_ENTER(&HttpSM::state_cache_open_partial_read, event);
+
+  //  ink_assert(NULL != cache_sm.cache_write_vc);
+  Debug("amc", "Handling partial read event");
+
+  switch (event) {
+  case CACHE_EVENT_OPEN_READ:
+    pending_action = NULL;
+
+    DebugSM("http", "[%" PRId64 "] cache_open_partial_read - CACHE_EVENT_OPEN_READ", sm_id);
+
+    ink_assert(cache_sm.cache_read_vc != NULL);
+
+    cache_sm.cache_read_vc->get_http_info(&t_state.cache_info.object_read);
+    ink_assert(t_state.cache_info.object_read != 0);
+    cache_sm.cache_read_vc->set_content_range(t_state.hdr_info.request_range);
+
+    t_state.next_action = HttpTransact::SM_ACTION_SERVE_FROM_CACHE;
+    t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR;
+
+    do_api_callout();
+    break;
+  case CACHE_EVENT_OPEN_READ_FAILED:
+    pending_action = NULL;
+
+    DebugSM("http", "[%" PRId64 "] cache_open_partial_read - "
+                    "CACHE_EVENT_OPEN_READ_FAILED",
+            sm_id);
+
+    // Need to do more here - mainly fall back to bypass from origin.
+    // Although we've got a serious problem if we don't open in this situation.
+    ink_assert("[amc] do something!");
+    break;
+
+
+  default:
+    // When the SM is in this state we've already started a tunnel running so we have to handle
+    // that case in here so unless it's an event of interest to this state, pass it on.
+    return this->tunnel_handler(event, data);
+  }
+
+  return 0;
+}
+
 int
 HttpSM::main_handler(int event, void *data)
 {
@@ -2747,6 +2806,9 @@ HttpSM::tunnel_handler(int event, void *data)
 {
   STATE_ENTER(&HttpSM::tunnel_handler, event);
 
+  if (CACHE_EVENT_OPEN_READ == event)
+    return 0;
+
   ink_assert(event == HTTP_TUNNEL_EVENT_DONE);
   ink_assert(data == &tunnel);
   // The tunnel calls this when it is done
@@ -2960,7 +3022,6 @@ HttpSM::tunnel_handler_server(int event, HttpTunnelProducer *p)
       ua_session->attach_server_session(server_session);
     } else {
       // Release the session back into the shared session pool
-      server_session->get_netvc()->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->keep_alive_no_activity_timeout_out));
       server_session->release();
     }
   }
@@ -4062,6 +4123,10 @@ HttpSM::do_hostdb_update_if_necessary()
 void
 HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
 {
+  (void)field;
+  (void)content_length;
+  return;
+#if 0
   int prev_good_range = -1;
   const char *value;
   int value_len;
@@ -4109,7 +4174,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
   t_state.range_in_cache = true;
 
   for (; value; value = csv.get_next(&value_len)) {
-    if (!(tmp = (const char *)memchr(value, '-', value_len))) {
+    if (!(tmp = (const char *) memchr(value, '-', value_len))) {
       t_state.range_setup = HttpTransact::RANGE_NONE;
       goto Lfaild;
     }
@@ -4118,8 +4183,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
     s = value;
     e = tmp;
     // skip leading white spaces
-    for (; s < e && ParseRules::is_ws(*s); ++s)
-      ;
+    for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
     if (s >= e)
       start = -1;
@@ -4127,8 +4191,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
       for (start = 0; s < e && *s >= '0' && *s <= '9'; ++s)
         start = start * 10 + (*s - '0');
       // skip last white spaces
-      for (; s < e && ParseRules::is_ws(*s); ++s)
-        ;
+      for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
       if (s < e || start < 0) {
         t_state.range_setup = HttpTransact::RANGE_NONE;
@@ -4140,8 +4203,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
     s = tmp + 1;
     e = value + value_len;
     // skip leading white spaces
-    for (; s < e && ParseRules::is_ws(*s); ++s)
-      ;
+    for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
     if (s >= e) {
       if (start < 0) {
@@ -4156,8 +4218,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
       for (end = 0; s < e && *s >= '0' && *s <= '9'; ++s)
         end = end * 10 + (*s - '0');
       // skip last white spaces
-      for (; s < e && ParseRules::is_ws(*s); ++s)
-        ;
+      for (; s < e && ParseRules::is_ws(*s); ++s) ;
 
       if (s < e || end < 0) {
         t_state.range_setup = HttpTransact::RANGE_NONE;
@@ -4195,9 +4256,10 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
     ranges[nr]._end = end;
     ++nr;
 
-    if (!cache_sm.cache_read_vc->is_pread_capable() && cache_config_read_while_writer == 2) {
+#if 0
+    if (!cache_sm.cache_read_vc->is_pread_capable() && cache_config_read_while_writer==2) {
       // write in progress, check if request range not in cache yet
-      HTTPInfo::FragOffset *frag_offset_tbl = t_state.cache_info.object_read->get_frag_table();
+      HTTPInfo::FragOffset* frag_offset_tbl = t_state.cache_info.object_read->get_frag_table();
       int frag_offset_cnt = t_state.cache_info.object_read->get_frag_offset_count();
 
       if (!frag_offset_tbl || !frag_offset_cnt || (frag_offset_tbl[frag_offset_cnt - 1] < (uint64_t)end)) {
@@ -4205,6 +4267,7 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
         t_state.range_in_cache = false;
       }
     }
+#endif
   }
 
   if (nr > 0) {
@@ -4220,16 +4283,22 @@ HttpSM::parse_range_and_compare(MIMEField *field, int64_t content_length)
 Lfaild:
   t_state.range_in_cache = false;
   t_state.num_range_fields = -1;
-  delete[] ranges;
+  delete []ranges;
   return;
+#endif
 }
 
 void
-HttpSM::calculate_output_cl(int64_t num_chars_for_ct, int64_t num_chars_for_cl)
+HttpSM::calculate_output_cl(int64_t content_length, int64_t num_chars)
 {
+#if 1
+  (void)content_length;
+  (void)num_chars;
+  return;
+#else
   int i;
 
-  if (t_state.range_setup != HttpTransact::RANGE_REQUESTED && t_state.range_setup != HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED)
+  if (t_state.range_setup != HttpTransact::RANGE_REQUESTED)
     return;
 
   ink_assert(t_state.ranges);
@@ -4240,9 +4309,9 @@ HttpSM::calculate_output_cl(int64_t num_chars_for_ct, int64_t num_chars_for_cl)
     for (i = 0; i < t_state.num_range_fields; i++) {
       if (t_state.ranges[i]._start >= 0) {
         t_state.range_output_cl += boundary_size;
-        t_state.range_output_cl += sub_header_size + num_chars_for_ct;
+        t_state.range_output_cl += sub_header_size + content_length;
         t_state.range_output_cl +=
-          num_chars_for_int(t_state.ranges[i]._start) + num_chars_for_int(t_state.ranges[i]._end) + num_chars_for_cl + 2;
+          num_chars_for_int(t_state.ranges[i]._start) + num_chars_for_int(t_state.ranges[i]._end) + num_chars + 2;
         t_state.range_output_cl += t_state.ranges[i]._end - t_state.ranges[i]._start + 1;
         t_state.range_output_cl += 2;
       }
@@ -4252,19 +4321,17 @@ HttpSM::calculate_output_cl(int64_t num_chars_for_ct, int64_t num_chars_for_cl)
   }
 
   Debug("http_range", "Pre-calculated Content-Length for Range response is %" PRId64, t_state.range_output_cl);
+#endif
 }
 
 void
 HttpSM::do_range_parse(MIMEField *range_field)
 {
-  int num_chars_for_ct = 0;
-  t_state.cache_info.object_read->response_get()->value_get(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE, &num_chars_for_ct);
-
   int64_t content_length = t_state.cache_info.object_read->object_size_get();
   int64_t num_chars_for_cl = num_chars_for_int(content_length);
 
   parse_range_and_compare(range_field, content_length);
-  calculate_output_cl(num_chars_for_ct, num_chars_for_cl);
+  calculate_output_cl(content_length, num_chars_for_cl);
 }
 
 // this function looks for any Range: headers, parses them and either
@@ -4273,6 +4340,9 @@ HttpSM::do_range_parse(MIMEField *range_field)
 void
 HttpSM::do_range_setup_if_necessary()
 {
+#if 1
+  t_state.range_setup = HttpTransact::RANGE_NONE;
+#else
   MIMEField *field;
   INKVConnInternal *range_trans;
   int field_content_type_len = -1;
@@ -4310,6 +4380,7 @@ HttpSM::do_range_setup_if_necessary()
       }
     }
   }
+#endif
 }
 
 
@@ -5479,15 +5550,16 @@ HttpSM::perform_cache_write_action()
     break;
   }
 
-  case HttpTransact::CACHE_DO_WRITE:
-  case HttpTransact::CACHE_DO_REPLACE:
+  case HttpTransact::CACHE_DO_WRITE: {
     // Fix need to set up delete for after cache write has
     //   completed
+
     if (transform_info.entry == NULL || t_state.api_info.cache_untransformed == true) {
-      cache_sm.close_read();
       t_state.cache_info.write_status = HttpTransact::CACHE_WRITE_IN_PROGRESS;
       setup_cache_write_transfer(&cache_sm, server_entry->vc, &t_state.cache_info.object_store, client_response_hdr_bytes,
                                  "cache write");
+
+      cache_sm.close_read();
     } else {
       // We are not caching the untransformed.  We might want to
       //  use the cache writevc to cache the transformed copy
@@ -5496,7 +5568,7 @@ HttpSM::perform_cache_write_action()
       cache_sm.cache_write_vc = NULL;
     }
     break;
-
+  }
   default:
     ink_release_assert(0);
     break;
@@ -5719,7 +5791,8 @@ HttpSM::setup_cache_read_transfer()
 
   ink_assert(cache_sm.cache_read_vc != NULL);
 
-  doc_size = t_state.cache_info.object_read->object_size_get();
+  //  doc_size = t_state.cache_info.object_read->object_size_get();
+  doc_size = cache_sm.cache_read_vc->get_effective_content_size();
   alloc_index = buffer_size_to_index(doc_size + index_to_buffer_size(HTTP_HEADER_BUFFER_SIZE_INDEX));
 
 #ifndef USE_NEW_EMPTY_MIOBUFFER
@@ -5741,8 +5814,14 @@ HttpSM::setup_cache_read_transfer()
 
   HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::tunnel_handler);
 
-  if (doc_size != INT64_MAX)
+  if (doc_size != INT64_MAX) {
+    /* Brokenness - if the object was already in cache, @a doc_size is correct based on the range because the
+       CacheVC had a chance to do that on the way here, but if not then the read CacheVC isn't fully set up
+       and doesn't account for the range data, so we do it. That needs to be rationalized.
+    */
+    doc_size = t_state.hdr_info.request_range.calcContentLength(doc_size, 0);
     doc_size += hdr_size;
+  }
 
   HttpTunnelProducer *p = tunnel.add_producer(cache_sm.cache_read_vc, doc_size, buf_start, &HttpSM::tunnel_handler_cache_read,
                                               HT_CACHE_READ, "cache read");
@@ -5795,15 +5874,26 @@ void
 HttpSM::setup_cache_write_transfer(HttpCacheSM *c_sm, VConnection *source_vc, HTTPInfo *store_info, int64_t skip_bytes,
                                    const char *name)
 {
+  bool partial_update_p = HttpTransact::RANGE_PARTIAL_UPDATE == t_state.range_setup;
   ink_assert(c_sm->cache_write_vc != NULL);
   ink_assert(t_state.request_sent_time > 0);
   ink_assert(t_state.response_received_time > 0);
+  ink_assert(store_info->valid() || partial_update_p);
+
+  if (!partial_update_p) {
+    store_info->request_sent_time_set(t_state.request_sent_time);
+    store_info->response_received_time_set(t_state.response_received_time);
 
-  store_info->request_sent_time_set(t_state.request_sent_time);
-  store_info->response_received_time_set(t_state.response_received_time);
+    if (t_state.hdr_info.response_range.isValid() && t_state.hdr_info.response_content_size != HTTP_UNDEFINED_CL)
+      store_info->object_size_set(t_state.hdr_info.response_content_size);
+
+    c_sm->cache_write_vc->set_http_info(store_info);
+    store_info->clear();
+  }
+
+  if (t_state.hdr_info.response_range.isValid())
+    c_sm->cache_write_vc->set_inbound_range(t_state.hdr_info.response_range._min, t_state.hdr_info.response_range._max);
 
-  c_sm->cache_write_vc->set_http_info(store_info);
-  store_info->clear();
 
   tunnel.add_consumer(c_sm->cache_write_vc, source_vc, &HttpSM::tunnel_handler_cache_write, HT_CACHE_WRITE, name, skip_bytes);
 
@@ -6155,7 +6245,7 @@ HttpSM::setup_transfer_from_transform_to_cache_only()
   return p;
 }
 
-void
+HttpTunnelProducer *
 HttpSM::setup_server_transfer_to_cache_only()
 {
   TunnelChunkingAction_t action;
@@ -6183,6 +6273,7 @@ HttpSM::setup_server_transfer_to_cache_only()
   setup_cache_write_transfer(&cache_sm, server_entry->vc, &t_state.cache_info.object_store, 0, "cache write");
 
   server_entry->in_tunnel = true;
+  return p;
 }
 
 HttpTunnelProducer *
@@ -6874,15 +6965,7 @@ HttpSM::set_next_state()
   case HttpTransact::SM_ACTION_DNS_LOOKUP: {
     sockaddr const *addr;
 
-    if ((strncmp(t_state.dns_info.lookup_name, "127.0.0.1", 9) == 0 || strncmp(t_state.dns_info.lookup_name, "::1", 3) == 0) &&
-        ats_ip_pton(t_state.dns_info.lookup_name, t_state.host_db_info.ip()) == 0) {
-      // If it's 127.0.0.1 or ::1 don't bother with hostdb
-      DebugSM("dns", "[HttpTransact::HandleRequest] Skipping DNS lookup for %s because it's loopback",
-              t_state.dns_info.lookup_name);
-      t_state.dns_info.lookup_success = true;
-      call_transact_and_set_next_state(NULL);
-      break;
-    } else if (t_state.api_server_addr_set) {
+    if (t_state.api_server_addr_set) {
       /* If the API has set the server address before the OS DNS lookup
        * then we can skip the lookup
        */
@@ -7008,6 +7091,16 @@ HttpSM::set_next_state()
     break;
   }
 
+  case HttpTransact::SM_ACTION_CACHE_OPEN_PARTIAL_READ: {
+#if 0
+      HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_cache_open_partial_read);
+      t_state.source = HttpTransact::SOURCE_CACHE;
+      pending_action = cache_sm.open_partial_read();
+#endif
+    ink_assert(!"[amc] Shouldn't get here");
+    break;
+  }
+
   case HttpTransact::SM_ACTION_SERVER_READ: {
     t_state.source = HttpTransact::SOURCE_HTTP_ORIGIN_SERVER;
 
@@ -7501,9 +7594,6 @@ HttpSM::redirect_request(const char *redirect_url, const int redirect_len)
         // the client request didn't have a host, so use the current origin host
         DebugSM("http_redirect", "[HttpSM::redirect_request] keeping client request host %s://%s", next_hop_scheme, origHost);
         char *origHost1 = strtok_r(origHost, ":", &saveptr);
-        if (origHost1 == NULL) {
-          goto LhostError;
-        }
         origHost_len = strlen(origHost1);
         int origHostPort_len = origHost_len;
         char buf[origHostPort_len + 7];
@@ -7537,7 +7627,6 @@ HttpSM::redirect_request(const char *redirect_url, const int redirect_len)
         t_state.hdr_info.client_request.m_target_cached = false;
         clientUrl.scheme_set(scheme_str, scheme_len);
       } else {
-      LhostError:
         // the server request didn't have a host, so remove it from the headers
         t_state.hdr_info.client_request.field_delete(MIME_FIELD_HOST, MIME_LEN_HOST);
       }
@@ -7641,3 +7730,14 @@ HttpSM::is_redirect_required()
   }
   return redirect_required;
 }
+
+char const *
+HttpSM::handlerName(int (HttpSM::*ptm)(int, void *))
+{
+  char const *zret = "*method*";
+  if (ptm == &HttpSM::tunnel_handler)
+    zret = "tunnel_handler";
+  else if (ptm == &HttpSM::state_cache_open_partial_read)
+    zret = "state_cache_open_partial_read";
+  return zret;
+}

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpSM.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpSM.h b/proxy/http/HttpSM.h
index 47f98eb..5221659 100644
--- a/proxy/http/HttpSM.h
+++ b/proxy/http/HttpSM.h
@@ -254,6 +254,7 @@ public:
   // Debugging routines to dump the SM history, hdrs
   void dump_state_on_assert();
   void dump_state_hdr(HTTPHdr *h, const char *s);
+  char const *handlerName(int (HttpSM::*ptm)(int, void *));
 
   // Functions for manipulating api hooks
   void txn_hook_append(TSHttpHookID id, INKContInternal *cont);
@@ -379,6 +380,7 @@ protected:
   // Cache Handlers
   int state_cache_open_read(int event, void *data);
   int state_cache_open_write(int event, void *data);
+  int state_cache_open_partial_read(int event, void *data);
 
   // Http Server Handlers
   int state_http_server_open(int event, void *data);
@@ -448,7 +450,7 @@ protected:
   void setup_server_send_request();
   void setup_server_send_request_api();
   HttpTunnelProducer *setup_server_transfer();
-  void setup_server_transfer_to_cache_only();
+  HttpTunnelProducer *setup_server_transfer_to_cache_only();
   HttpTunnelProducer *setup_cache_read_transfer();
   void setup_internal_transfer(HttpSMHandler handler);
   void setup_error_transfer();

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransact.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc
index f925775..5a56f9c 100644
--- a/proxy/http/HttpTransact.cc
+++ b/proxy/http/HttpTransact.cc
@@ -47,8 +47,12 @@
 #include "HttpClientSession.h"
 #include "I_Machine.h"
 
-static char range_type[] = "multipart/byteranges; boundary=RANGE_SEPARATOR";
-#define RANGE_NUMBERS_LENGTH 60
+static char const HTTP_RANGE_MULTIPART_CONTENT_TYPE[] = "multipart/byteranges; boundary=";
+
+/// If the intial uncached segment is less than this, expand the request to include the earliest fragment.
+/// Hardwired for now, this needs to be promoted to a config var at some point. It should also be a multiple
+/// of the fragment size.
+static int64_t const MIN_INITIAL_UNCACHED = 4 * 1 << 20;
 
 #define HTTP_INCREMENT_TRANS_STAT(X) update_stat(s, X, 1);
 #define HTTP_SUM_TRANS_STAT(X, S) update_stat(s, X, (ink_statval_t)S);
@@ -1799,7 +1803,7 @@ HttpTransact::OSDNSLookup(State *s)
     } else {
       if ((s->cache_info.action == CACHE_DO_NO_ACTION) &&
           (((s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE) && !s->txn_conf->cache_range_write) ||
-            s->range_setup == RANGE_NOT_SATISFIABLE || s->range_setup == RANGE_NOT_HANDLED))) {
+            s->range_setup == RANGE_NOT_SATISFIABLE))) {
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadMiss);
       } else if (!s->txn_conf->cache_http || s->cache_lookup_result == HttpTransact::CACHE_LOOKUP_SKIPPED) {
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, LookupSkipOpenServer);
@@ -1807,7 +1811,7 @@ HttpTransact::OSDNSLookup(State *s)
         // from the DNS we need to call LookupSkipOpenServer
       } else if (s->cache_lookup_result == CACHE_LOOKUP_HIT_FRESH || s->cache_lookup_result == CACHE_LOOKUP_HIT_WARNING ||
                  s->cache_lookup_result == CACHE_LOOKUP_HIT_STALE) {
-        // DNS lookup is done if the content is state need to call handle cache open read hit
+        // DNS lookup is done if the content is stale need to call handle cache open read hit
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadHit);
       } else if (s->cache_lookup_result == CACHE_LOOKUP_MISS || s->cache_info.action == CACHE_DO_NO_ACTION) {
         TRANSACT_RETURN(SM_ACTION_API_OS_DNS, HandleCacheOpenReadMiss);
@@ -2556,6 +2560,7 @@ HttpTransact::HandleCacheOpenReadHit(State *s)
   bool needs_cache_auth = false;
   bool server_up = true;
   CacheHTTPInfo *obj;
+  HTTPRangeSpec range;
 
   if (s->api_update_cached_object == HttpTransact::UPDATE_CACHED_OBJECT_CONTINUE) {
     obj = &s->cache_info.object_store;
@@ -2730,6 +2735,31 @@ HttpTransact::HandleCacheOpenReadHit(State *s)
       SET_VIA_STRING(VIA_DETAIL_CACHE_TYPE, VIA_DETAIL_CACHE);
     }
   }
+
+  // Check if we need to get some data from the origin.
+  if (s->state_machine->get_cache_sm().cache_read_vc->get_uncached(s->hdr_info.request_range, range, MIN_INITIAL_UNCACHED)) {
+    Debug("amc", "Request touches uncached fragments");
+    find_server_and_update_current_info(s);
+    if (!ats_is_ip(&s->current.server->addr)) {
+      if (s->current.request_to == PARENT_PROXY) {
+        TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup);
+      } else if (s->current.request_to == ORIGIN_SERVER) {
+        TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup);
+      } else {
+        ink_assert(!"[amc] - where was this going?");
+        return;
+      }
+    }
+    build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->client_info.http_version, &range);
+    s->cache_info.action = CACHE_PREPARE_TO_WRITE;
+    s->range_setup = RANGE_PARTIAL_UPDATE;
+    s->next_action = how_to_open_connection(s);
+    if (s->stale_icp_lookup && s->next_action == SM_ACTION_ORIGIN_SERVER_OPEN) {
+      s->next_action = SM_ACTION_ICP_QUERY;
+    }
+    return;
+  }
+
   // cache hit, document is fresh, does not authorization,
   // is valid, etc. etc. send it back to the client.
   //
@@ -2850,7 +2880,7 @@ HttpTransact::build_response_from_cache(State *s, HTTPWarningCode warning_code)
       // send back the full document to the client.
       DebugTxn("http_trans", "[build_response_from_cache] Match! Serving full document.");
       s->cache_info.action = CACHE_DO_SERVE;
-
+#if 0
       // Check if cached response supports Range. If it does, append
       // Range transformation plugin
       // only if the cached response is a 200 OK
@@ -2885,6 +2915,10 @@ HttpTransact::build_response_from_cache(State *s, HTTPWarningCode warning_code)
         build_response(s, cached_response, &s->hdr_info.client_response, s->client_info.http_version);
       }
       s->next_action = SM_ACTION_SERVE_FROM_CACHE;
+#else
+      build_response(s, cached_response, &s->hdr_info.client_response, s->client_info.http_version);
+      s->next_action = SM_ACTION_SERVE_FROM_CACHE;
+#endif
     }
     // If the client request is a HEAD, then serve the header from cache.
     else if (s->method == HTTP_WKSIDX_HEAD) {
@@ -3072,12 +3106,16 @@ HttpTransact::HandleCacheOpenReadMiss(State *s)
   // We must, however, not cache the responses to these requests.
   if (does_method_require_cache_copy_deletion(s->http_config_param, s->method) && s->api_req_cacheable == false) {
     s->cache_info.action = CACHE_DO_NO_ACTION;
+#if 0
   } else if ((s->hdr_info.client_request.presence(MIME_PRESENCE_RANGE) && !s->txn_conf->cache_range_write) ||
              does_method_effect_cache(s->method) == false || s->range_setup == RANGE_NOT_SATISFIABLE ||
              s->range_setup == RANGE_NOT_HANDLED) {
     s->cache_info.action = CACHE_DO_NO_ACTION;
+#endif
   } else {
     s->cache_info.action = CACHE_PREPARE_TO_WRITE;
+    if (s->hdr_info.request_range.hasRanges())
+      s->range_setup = RANGE_PARTIAL_WRITE;
   }
 
   // We should not issue an ICP lookup if the request has a
@@ -3119,8 +3157,8 @@ HttpTransact::HandleCacheOpenReadMiss(State *s)
         return;
       }
     }
-    build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version);
-
+    build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version,
+                  &s->hdr_info.request_range);
     s->next_action = how_to_open_connection(s);
   } else { // miss, but only-if-cached is set
     build_error_response(s, HTTP_STATUS_GATEWAY_TIMEOUT, "Not Cached", "cache#not_in_cache", NULL);
@@ -4102,6 +4140,7 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
   HTTPStatus client_response_code = HTTP_STATUS_NONE;
   const char *warn_text = NULL;
   bool cacheable = false;
+  HTTPRangeSpec ranges;
 
   cacheable = is_response_cacheable(s, &s->hdr_info.client_request, &s->hdr_info.server_response);
   DebugTxn("http_trans", "[hcoofsr] response %s cacheable", cacheable ? "is" : "is not");
@@ -4317,7 +4356,12 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
     }
 
     s->next_action = SM_ACTION_SERVER_READ;
-    client_response_code = server_response_code;
+    // If we got back 206 but the original request wasn't partial, then we're doing a partial update and need to return 200.
+    // Need to strip Content-Range at some point as well.
+    if (HTTP_STATUS_PARTIAL_CONTENT == server_response_code && s->hdr_info.request_range.isEmpty())
+      client_response_code = HTTP_STATUS_OK;
+    else
+      client_response_code = server_response_code;
     base_response = &s->hdr_info.server_response;
 
     s->negative_caching = is_negative_caching_appropriate(s) && cacheable;
@@ -4423,6 +4467,19 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
     break;
   }
 
+
+#if 0
+  /* If we plan to do a write and the request was partial, then we need to open a
+     cache read to service the request and not just pass through.
+  */
+  if (SM_ACTION_SERVER_READ == s->next_action &&
+      CACHE_DO_WRITE == s->cache_info.action &&
+      s->hdr_info.request_range.hasRanges()
+  ) {
+    s->next_action = SM_ACTION_CACHE_OPEN_PARTIAL_READ;
+  }
+#endif
+
   // update stat, set via string, etc
 
   switch (s->cache_info.action) {
@@ -4481,7 +4538,9 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
   }
   ink_assert(base_response->valid());
 
-  if ((s->cache_info.action == CACHE_DO_WRITE) || (s->cache_info.action == CACHE_DO_REPLACE)) {
+  if (((s->cache_info.action == CACHE_DO_WRITE) || (s->cache_info.action == CACHE_DO_REPLACE)) &&
+      s->range_setup != RANGE_PARTIAL_UPDATE) {
+    // If it's a partial write then we already have the cached headers, no need to pass these in.
     set_headers_for_cache_write(s, &s->cache_info.object_store, &s->hdr_info.server_request, &s->hdr_info.server_response);
   }
   // 304, 412, and 416 responses are handled here
@@ -4525,6 +4584,9 @@ HttpTransact::handle_cache_operation_on_forward_server_response(State *s)
     if (((s->next_action == SM_ACTION_SERVE_FROM_CACHE) || (s->next_action == SM_ACTION_SERVER_READ)) &&
         s->state_machine->do_transform_open()) {
       set_header_for_transform(s, base_response);
+    } else if (s->hdr_info.request_range.isEmpty() && s->cache_info.object_read->valid()) {
+      build_response(s, s->cache_info.object_read->response_get(), &s->hdr_info.client_response, s->client_info.http_version);
+      s->hdr_info.client_response.set_content_length(s->cache_info.object_read->object_size_get());
     } else {
       build_response(s, base_response, &s->hdr_info.client_response, s->client_info.http_version, client_response_code);
     }
@@ -4831,6 +4893,12 @@ HttpTransact::set_headers_for_cache_write(State *s, HTTPInfo *cache_info, HTTPHd
   cache_info->request_get()->field_delete(MIME_FIELD_VIA, MIME_LEN_VIA);
   // server 200 Ok for Range request
   cache_info->request_get()->field_delete(MIME_FIELD_RANGE, MIME_LEN_RANGE);
+  if (NULL != cache_info->response_get()->field_find(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE)) {
+    cache_info->response_get()->field_delete(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
+    cache_info->response_get()->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
+    cache_info->response_get()->status_set(HTTP_STATUS_OK);
+    cache_info->response_get()->reason_set(HTTP_STATUS_OK);
+  }
 
   // If we're ignoring auth, then we don't want to cache WWW-Auth
   //  headers
@@ -5205,6 +5273,8 @@ HttpTransact::add_client_ip_to_outgoing_request(State *s, HTTPHdr *request)
 HttpTransact::RequestError_t
 HttpTransact::check_request_validity(State *s, HTTPHdr *incoming_hdr)
 {
+  MIMEField *f; // temp for field checks.
+
   if (incoming_hdr == 0) {
     return NON_EXISTANT_REQUEST_HEADER;
   }
@@ -5324,6 +5394,14 @@ HttpTransact::check_request_validity(State *s, HTTPHdr *incoming_hdr)
     }
   }
 
+  if (0 != (f = incoming_hdr->field_find(MIME_FIELD_RANGE, MIME_LEN_RANGE))) {
+    int len;
+    char const *val = f->value_get(&len);
+    if (!s->hdr_info.request_range.parseRangeFieldValue(val, len))
+      return INVALID_RANGE_FIELD;
+  }
+
+
   return NO_REQUEST_HEADER_ERROR;
 }
 
@@ -5669,6 +5747,8 @@ HttpTransact::initialize_state_variables_from_request(State *s, HTTPHdr *obsolet
 void
 HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incoming_response)
 {
+  MIMEField *field;
+
   /* check if the server permits caching */
   s->cache_info.directives.does_server_permit_storing =
     HttpTransactHeaders::does_server_allow_response_to_be_stored(&s->hdr_info.server_response);
@@ -5708,8 +5788,7 @@ HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incomi
     // This code used to discriminate CL: headers when the origin disabled keep-alive.
     if (incoming_response->presence(MIME_PRESENCE_CONTENT_LENGTH)) {
       int64_t cl = incoming_response->get_content_length();
-
-      s->hdr_info.response_content_length = (cl >= 0) ? cl : HTTP_UNDEFINED_CL;
+      s->hdr_info.response_content_length = cl < 0 ? HTTP_UNDEFINED_CL : cl;
       s->hdr_info.trust_response_cl = true;
     } else {
       s->hdr_info.response_content_length = HTTP_UNDEFINED_CL;
@@ -5718,8 +5797,7 @@ HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incomi
   }
 
   if (incoming_response->presence(MIME_PRESENCE_TRANSFER_ENCODING)) {
-    MIMEField *field = incoming_response->field_find(MIME_FIELD_TRANSFER_ENCODING, MIME_LEN_TRANSFER_ENCODING);
-    ink_assert(field != NULL);
+    field = incoming_response->field_find(MIME_FIELD_TRANSFER_ENCODING, MIME_LEN_TRANSFER_ENCODING);
 
     HdrCsvIter enc_val_iter;
     int enc_val_len;
@@ -5780,6 +5858,15 @@ HttpTransact::initialize_state_variables_from_response(State *s, HTTPHdr *incomi
     }
   }
 
+  // Get the incoming range to store from the origin.
+  if (NULL != (field = incoming_response->field_find(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE))) {
+    int len;
+    char const *cr = field->value_get(&len);
+    s->hdr_info.response_content_size =
+      HTTPRangeSpec::parseContentRangeFieldValue(cr, len, s->hdr_info.response_range, s->hdr_info.response_range_boundary);
+  }
+
+
   s->current.server->transfer_encoding = NO_TRANSFER_ENCODING;
 }
 
@@ -6087,9 +6174,6 @@ HttpTransact::is_response_cacheable(State *s, HTTPHdr *request, HTTPHdr *respons
                            "request is not cache lookupable, response is not cachable");
     return false;
   }
-  // already has a fresh copy in the cache
-  if (s->range_setup == RANGE_NOT_HANDLED)
-    return false;
 
   // Check whether the response is cachable based on its cookie
   // If there are cookies in response but a ttl is set, allow caching
@@ -6177,11 +6261,20 @@ HttpTransact::is_response_cacheable(State *s, HTTPHdr *request, HTTPHdr *respons
     }
   }
   // do not cache partial content - Range response
-  if (response_code == HTTP_STATUS_PARTIAL_CONTENT || response_code == HTTP_STATUS_RANGE_NOT_SATISFIABLE) {
+  if (response_code == HTTP_STATUS_RANGE_NOT_SATISFIABLE) {
     DebugTxn("http_trans", "[is_response_cacheable] "
                            "response code %d - don't cache",
              response_code);
     return false;
+  } else if (response->presence(MIME_PRESENCE_CONTENT_RANGE) && !s->hdr_info.response_range.isValid()) {
+    if (0 <= s->hdr_info.response_content_size) {
+      DebugTxn("http_trans", "[is_response_cacheable] "
+                             "Content-Range header present with unsatisfiable range");
+    } else {
+      DebugTxn("http_trans", "[is_response_cacheable] "
+                             "Content-Range header present but unparsable");
+    }
+    return false;
   }
 
   // check if cache control overrides default cacheability
@@ -6242,8 +6335,9 @@ HttpTransact::is_response_cacheable(State *s, HTTPHdr *request, HTTPHdr *respons
   // default cacheability
   if (!s->txn_conf->negative_caching_enabled) {
     if ((response_code == HTTP_STATUS_OK) || (response_code == HTTP_STATUS_NOT_MODIFIED) ||
-        (response_code == HTTP_STATUS_NON_AUTHORITATIVE_INFORMATION) || (response_code == HTTP_STATUS_MOVED_PERMANENTLY) ||
-        (response_code == HTTP_STATUS_MULTIPLE_CHOICES) || (response_code == HTTP_STATUS_GONE)) {
+        (response_code == HTTP_STATUS_PARTIAL_CONTENT) || (response_code == HTTP_STATUS_NON_AUTHORITATIVE_INFORMATION) ||
+        (response_code == HTTP_STATUS_MOVED_PERMANENTLY) || (response_code == HTTP_STATUS_MULTIPLE_CHOICES) ||
+        (response_code == HTTP_STATUS_GONE)) {
       DebugTxn("http_trans", "[is_response_cacheable] YES by default ");
       return true;
     } else {
@@ -6380,6 +6474,11 @@ HttpTransact::is_request_valid(State *s, HTTPHdr *incoming_request)
     build_error_response(s, HTTP_STATUS_BAD_REQUEST, "Invalid Content Length", "request#invalid_content_length", NULL);
     return false;
   }
+  case INVALID_RANGE_FIELD: {
+    DebugTxn("http_trans", "[is_request_valid] a Range field was present with an invalid range specification");
+    SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_NO_FORWARD);
+    build_error_response(s, HTTP_STATUS_BAD_REQUEST, "Invalid Range", "request#syntax_error", NULL);
+  }
   default:
     return true;
   }
@@ -6652,6 +6751,7 @@ void
 HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *base)
 {
   int64_t cl = HTTP_UNDEFINED_CL;
+
   ink_assert(header->type_get() == HTTP_TYPE_RESPONSE);
   if (base->presence(MIME_PRESENCE_CONTENT_LENGTH)) {
     cl = base->get_content_length();
@@ -6663,13 +6763,16 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
       case SOURCE_HTTP_ORIGIN_SERVER:
         // We made our decision about whether to trust the
         //   response content length in init_state_vars_from_response()
-        if (s->range_setup != HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED)
-          break;
+        if (s->hdr_info.request_range.hasRanges()) {
+          change_response_header_because_of_range_request(s, header);
+          s->hdr_info.trust_response_cl = true;
+        }
+        break;
 
       case SOURCE_CACHE:
         // if we are doing a single Range: request, calculate the new
         // C-L: header
-        if (s->range_setup == HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED) {
+        if (s->hdr_info.request_range.hasRanges()) {
           change_response_header_because_of_range_request(s, header);
           s->hdr_info.trust_response_cl = true;
         }
@@ -6689,7 +6792,7 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
         break;
 
       case SOURCE_TRANSFORM:
-        if (s->range_setup == HttpTransact::RANGE_REQUESTED) {
+        if (s->hdr_info.request_range.hasRanges()) {
           header->set_content_length(s->range_output_cl);
           s->hdr_info.trust_response_cl = true;
         } else if (s->hdr_info.transform_response_cl == HTTP_UNDEFINED_CL) {
@@ -6722,7 +6825,7 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
         s->hdr_info.trust_response_cl = false;
         s->hdr_info.request_content_length = HTTP_UNDEFINED_CL;
         ink_assert(s->range_setup == RANGE_NONE);
-      } else if (s->range_setup == RANGE_NOT_TRANSFORM_REQUESTED) {
+      } else if (s->hdr_info.response_range.isValid()) {
         // if we are doing a single Range: request, calculate the new
         // C-L: header
         change_response_header_because_of_range_request(s, header);
@@ -6744,7 +6847,6 @@ HttpTransact::handle_content_length_header(State *s, HTTPHdr *header, HTTPHdr *b
         s->hdr_info.trust_response_cl = false;
       }
       header->field_delete(MIME_FIELD_CONTENT_LENGTH, MIME_LEN_CONTENT_LENGTH);
-      ink_assert(s->range_setup != RANGE_NOT_TRANSFORM_REQUESTED);
     }
   }
   return;
@@ -7691,7 +7793,8 @@ HttpTransact::is_request_likely_cacheable(State *s, HTTPHdr *request)
 }
 
 void
-HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version)
+HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version,
+                            HTTPRangeSpec const *ranges)
 {
   // this part is to restore the original URL in case, multiple cache
   // lookups have happened - client request has been changed as the result
@@ -7718,6 +7821,8 @@ HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_r
   HttpTransactHeaders::remove_privacy_headers_from_request(s->http_config_param, s->txn_conf, outgoing_request);
   HttpTransactHeaders::add_global_user_agent_header_to_request(s->txn_conf, outgoing_request);
   handle_request_keep_alive_headers(s, outgoing_version, outgoing_request);
+  if (ranges)
+    HttpTransactHeaders::insert_request_range_header(outgoing_request, ranges);
 
   // handle_conditional_headers appears to be obsolete.  Nothing happens
   // unelss s->cache_info.action == HttpTransact::CACHE_DO_UPDATE.  In that
@@ -7848,7 +7953,8 @@ HttpTransact::build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing
   if (base_response == NULL) {
     HttpTransactHeaders::build_base_response(outgoing_response, status_code, reason_phrase, strlen(reason_phrase), s->current.now);
   } else {
-    if ((status_code == HTTP_STATUS_NONE) || (status_code == base_response->status_get())) {
+    if ((status_code == HTTP_STATUS_NONE) || (status_code == base_response->status_get()) ||
+        (HTTP_STATUS_OK == status_code && HTTP_STATUS_PARTIAL_CONTENT == base_response->status_get())) {
       HttpTransactHeaders::copy_header_fields(base_response, outgoing_response, s->txn_conf->fwd_proxy_auth_to_parent);
 
       if (s->txn_conf->insert_age_in_response)
@@ -7862,6 +7968,7 @@ HttpTransact::build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing
       //  before processing the keep_alive headers
       //
       handle_content_length_header(s, outgoing_response, base_response);
+
     } else
       switch (status_code) {
       case HTTP_STATUS_NOT_MODIFIED:
@@ -9005,8 +9112,12 @@ HttpTransact::delete_warning_value(HTTPHdr *to_warn, HTTPWarningCode warning_cod
 void
 HttpTransact::change_response_header_because_of_range_request(State *s, HTTPHdr *header)
 {
-  MIMEField *field;
+  MIMEField *field = header->field_find(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
   char *reason_phrase;
+  //  CacheVConnection* cache_read_vc = s->state_machine->get_cache_sm().cache_read_vc;
+  //  HTTPHdr* cached_response = find_appropriate_cached_resp(s);
+  //  HTTPRangeSpec& rs = cache_read_vc->get_http_range_spec();
+  HTTPRangeSpec &rs = s->state_machine->t_state.hdr_info.request_range;
 
   Debug("http_trans", "Partial content requested, re-calculating content-length");
 
@@ -9015,36 +9126,34 @@ HttpTransact::change_response_header_because_of_range_request(State *s, HTTPHdr
   header->reason_set(reason_phrase, strlen(reason_phrase));
 
   // set the right Content-Type for multiple entry Range
-  if (s->num_range_fields > 1) {
-    field = header->field_find(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
+  if (rs.isMulti()) { // means we need a boundary string.
+    ink_release_assert(!"[amc] Computation of boundary string not correct working");
+#if 0
+    int rbs_len;
+    char const* rbs = cache_read_vc->get_http_range_boundary_string(&rbs_len);
+    char buff[(sizeof(HTTP_RANGE_MULTIPART_CONTENT_TYPE)-1) + HTTP_RANGE_BOUNDARY_LEN];
 
     if (field != NULL)
       header->field_delete(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
 
     field = header->field_create(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE);
-    field->value_append(header->m_heap, header->m_mime, range_type, sizeof(range_type) - 1);
+    snprintf(buff, sizeof(buff), "%s%.*s", HTTP_RANGE_MULTIPART_CONTENT_TYPE, rbs_len, rbs);
+    field->value_append(header->m_heap, header->m_mime, buff, sizeof(buff));
 
     header->field_attach(field);
-    // TODO: There's a known bug here where the Content-Length is not correct for multi-part
-    // Range: requests.
-    header->set_content_length(s->range_output_cl);
-  } else {
-    if (s->cache_info.object_read && s->cache_info.object_read->valid()) {
-      // TODO: It's unclear under which conditions we need to update the Content-Range: header,
-      // many times it's already set correctly before calling this. For now, always try do it
-      // when we have the information for it available.
-      // TODO: Also, it's unclear as to why object_read->valid() is not always true here.
-      char numbers[RANGE_NUMBERS_LENGTH];
-      header->field_delete(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
-      field = header->field_create(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
-      snprintf(numbers, sizeof(numbers), "bytes %" PRId64 "-%" PRId64 "/%" PRId64, s->ranges[0]._start, s->ranges[0]._end,
-               s->cache_info.object_read->object_size_get());
-      field->value_set(header->m_heap, header->m_mime, numbers, strlen(numbers));
-      header->field_attach(field);
-    }
-    // Always update the Content-Length: header.
-    header->set_content_length(s->range_output_cl);
+#endif
+  } else if (rs.isSingle()) {
+    int n;
+    char buff[HTTP_LEN_BYTES + (18 + 1) * 3];
+    header->field_delete(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
+    field = header->field_create(MIME_FIELD_CONTENT_RANGE, MIME_LEN_CONTENT_RANGE);
+    n = snprintf(buff, sizeof(buff), "%s %" PRIu64 "-%" PRIu64 "/%" PRId64, HTTP_VALUE_BYTES, rs[0]._min, rs[0]._max,
+                 s->state_machine->t_state.hdr_info.response_content_size);
+    field->value_set(header->m_heap, header->m_mime, buff, n);
+    header->field_attach(field);
+    header->set_content_length(rs.size());
   }
+  //  header->set_content_length(cache_read_vc->get_effective_content_size());
 }
 
 #if TS_HAS_TESTS

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransact.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransact.h b/proxy/http/HttpTransact.h
index fa64940..6d53231 100644
--- a/proxy/http/HttpTransact.h
+++ b/proxy/http/HttpTransact.h
@@ -21,7 +21,6 @@
   limitations under the License.
  */
 
-
 #if !defined(_HttpTransact_h_)
 #define _HttpTransact_h_
 
@@ -53,44 +52,37 @@
 #define ACQUIRE_PRINT_LOCK() // ink_mutex_acquire(&print_lock);
 #define RELEASE_PRINT_LOCK() // ink_mutex_release(&print_lock);
 
-#define DUMP_HEADER(T, H, I, S)                                 \
-  {                                                             \
-    if (diags->on(T)) {                                         \
-      ACQUIRE_PRINT_LOCK()                                      \
-      fprintf(stderr, "+++++++++ %s +++++++++\n", S);           \
-      fprintf(stderr, "-- State Machine Id: %" PRId64 "\n", I); \
-      char b[4096];                                             \
-      int used, tmp, offset;                                    \
-      int done;                                                 \
-      offset = 0;                                               \
-      if ((H)->valid()) {                                       \
-        do {                                                    \
-          used = 0;                                             \
-          tmp = offset;                                         \
-          done = (H)->print(b, 4095, &used, &tmp);              \
-          offset += used;                                       \
-          b[used] = '\0';                                       \
-          fprintf(stderr, "%s", b);                             \
-        } while (!done);                                        \
-      }                                                         \
-      RELEASE_PRINT_LOCK()                                      \
-    }                                                           \
+#define DUMP_HEADER(T, H, I, S)                                            \
+  {                                                                        \
+    if (diags->on(T)) {                                                    \
+      ACQUIRE_PRINT_LOCK() fprintf(stderr, "+++++++++ %s +++++++++\n", S); \
+      fprintf(stderr, "-- State Machine Id: %" PRId64 "\n", I);            \
+      char b[4096];                                                        \
+      int used, tmp, offset;                                               \
+      int done;                                                            \
+      offset = 0;                                                          \
+      if ((H)->valid()) {                                                  \
+        do {                                                               \
+          used = 0;                                                        \
+          tmp = offset;                                                    \
+          done = (H)->print(b, 4095, &used, &tmp);                         \
+          offset += used;                                                  \
+          b[used] = '\0';                                                  \
+          fprintf(stderr, "%s", b);                                        \
+        } while (!done);                                                   \
+      }                                                                    \
+      RELEASE_PRINT_LOCK()                                                 \
+    }                                                                      \
   }
 
-
 #define TRANSACT_SETUP_RETURN(n, r) \
   s->next_action = n;               \
   s->transact_return_point = r;     \
   DebugSpecific((s->state_machine && s->state_machine->debug_on), "http_trans", "Next action %s; %s", #n, #r);
 
-#define TRANSACT_RETURN(n, r) \
-  TRANSACT_SETUP_RETURN(n, r) \
-  return;
-
-#define TRANSACT_RETURN_VAL(n, r, v) \
-  TRANSACT_SETUP_RETURN(n, r)        \
-  return v;
+#define TRANSACT_RETURN(n, r) TRANSACT_SETUP_RETURN(n, r) return;
 
+#define TRANSACT_RETURN_VAL(n, r, v) TRANSACT_SETUP_RETURN(n, r) return v;
 
 #define SET_UNPREPARE_CACHE_ACTION(C)                               \
   {                                                                 \
@@ -377,6 +369,7 @@ public:
     SCHEME_NOT_SUPPORTED,
     UNACCEPTABLE_TE_REQUIRED,
     INVALID_POST_CONTENT_LENGTH,
+    INVALID_RANGE_FIELD,
     TOTAL_REQUEST_ERROR_TYPES
   };
 
@@ -446,22 +439,18 @@ public:
     // SM_ACTION_AUTH_LOOKUP,
     SM_ACTION_DNS_LOOKUP,
     SM_ACTION_DNS_REVERSE_LOOKUP,
-
     SM_ACTION_CACHE_LOOKUP,
     SM_ACTION_CACHE_ISSUE_WRITE,
     SM_ACTION_CACHE_ISSUE_WRITE_TRANSFORM,
     SM_ACTION_CACHE_PREPARE_UPDATE,
     SM_ACTION_CACHE_ISSUE_UPDATE,
-
+    SM_ACTION_CACHE_OPEN_PARTIAL_READ,
     SM_ACTION_ICP_QUERY,
-
     SM_ACTION_ORIGIN_SERVER_OPEN,
     SM_ACTION_ORIGIN_SERVER_RAW_OPEN,
     SM_ACTION_ORIGIN_SERVER_RR_MARK_DOWN,
-
     SM_ACTION_READ_PUSH_HDR,
     SM_ACTION_STORE_PUSH_BODY,
-
     SM_ACTION_INTERNAL_CACHE_DELETE,
     SM_ACTION_INTERNAL_CACHE_NOOP,
     SM_ACTION_INTERNAL_CACHE_UPDATE_HEADERS,
@@ -473,14 +462,12 @@ public:
 #ifdef PROXY_DRAIN
     SM_ACTION_DRAIN_REQUEST_BODY,
 #endif /* PROXY_DRAIN */
-
     SM_ACTION_SERVE_FROM_CACHE,
     SM_ACTION_SERVER_READ,
     SM_ACTION_SERVER_PARSE_NEXT_HDR,
     SM_ACTION_TRANSFORM_READ,
     SM_ACTION_SSL_TUNNEL,
     SM_ACTION_CONTINUE,
-
     SM_ACTION_API_SM_START,
     SM_ACTION_API_READ_REQUEST_HDR,
     SM_ACTION_API_PRE_REMAP,
@@ -492,7 +479,6 @@ public:
     SM_ACTION_API_READ_RESPONSE_HDR,
     SM_ACTION_API_SEND_RESPONSE_HDR,
     SM_ACTION_API_SM_SHUTDOWN,
-
     SM_ACTION_REMAP_REQUEST,
     SM_ACTION_POST_REMAP_SKIP,
     SM_ACTION_REDIRECT_READ
@@ -544,10 +530,9 @@ public:
 
   enum RangeSetup_t {
     RANGE_NONE = 0,
-    RANGE_REQUESTED,
     RANGE_NOT_SATISFIABLE,
-    RANGE_NOT_HANDLED,
-    RANGE_NOT_TRANSFORM_REQUESTED,
+    RANGE_PARTIAL_WRITE,  ///< Cache a range request.
+    RANGE_PARTIAL_UPDATE, ///< Update an existing object with a range request.
   };
 
   enum CacheAuth_t {
@@ -760,9 +745,15 @@ public:
     HTTPHdr transform_response;
     HTTPHdr cache_response;
     int64_t request_content_length;
-    int64_t response_content_length;
+    int64_t response_content_length; // Length of the payload (Content-Length
+                                     // field)
+    int64_t response_content_size;   // Total size of the object on the origin
+                                     // server.
     int64_t transform_request_cl;
     int64_t transform_response_cl;
+    HTTPRangeSpec request_range;
+    HTTPRangeSpec::Range response_range;
+    ts::ConstBuffer response_range_boundary; // not used yet
     bool client_req_is_server_style;
     bool trust_response_cl;
     ResponseError_t response_error;
@@ -786,7 +777,6 @@ public:
     _SquidLogInfo() : log_code(SQUID_LOG_ERR_UNKNOWN), hier_code(SQUID_HIER_EMPTY), hit_miss_code(SQUID_MISS_NONE) {}
   } SquidLogInfo;
 
-
 #define HTTP_TRANSACT_STATE_MAX_XBUF_SIZE (1024 * 2) /* max size of plugin exchange buffer */
 
   struct State {
@@ -842,7 +832,8 @@ public:
     StateMachineAction_t api_next_action;                  // out
     void (*transact_return_point)(HttpTransact::State *s); // out
 
-    // We keep this so we can jump back to the upgrade handler after remap is complete
+    // We keep this so we can jump back to the upgrade handler after remap is
+    // complete
     bool is_upgrade_request;
     void (*post_remap_upgrade_return_point)(HttpTransact::State *s); // out
     const char *upgrade_token_wks;
@@ -903,7 +894,8 @@ public:
     int api_txn_no_activity_timeout_value;
 
     // Used by INKHttpTxnCachedReqGet and INKHttpTxnCachedRespGet SDK functions
-    // to copy part of HdrHeap (only the writable portion) for cached response headers
+    // to copy part of HdrHeap (only the writable portion) for cached response
+    // headers
     // and request headers
     // These ptrs are deallocate when transaction is over.
     HdrHeapSDKHandle *cache_req_hdr_heap_handle;
@@ -951,7 +943,8 @@ public:
     RangeRecord *ranges;
 
     OverridableHttpConfigParams *txn_conf;
-    OverridableHttpConfigParams my_txn_conf; // Storage for plugins, to avoid malloc
+    OverridableHttpConfigParams my_txn_conf; // Storage for plugins, to avoid
+                                             // malloc
 
     bool transparent_passthrough;
     bool range_in_cache;
@@ -1218,7 +1211,8 @@ public:
   static bool will_this_request_self_loop(State *s);
   static bool is_request_likely_cacheable(State *s, HTTPHdr *request);
 
-  static void build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version);
+  static void build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_request, HTTPVersion outgoing_version,
+                            HTTPRangeSpec const *ranges = 0);
   static void build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing_response, HTTPVersion outgoing_version,
                              HTTPStatus status_code, const char *reason_phrase = NULL);
   static void build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing_response, HTTPVersion outgoing_version);
@@ -1260,7 +1254,8 @@ public:
   static void client_result_stat(State *s, ink_hrtime total_time, ink_hrtime request_process_time);
   static void add_new_stat_block(State *s);
   static void delete_warning_value(HTTPHdr *to_warn, HTTPWarningCode warning_code);
-  static bool is_connection_collapse_checks_success(State *s); // YTS Team, yamsat
+  static bool is_connection_collapse_checks_success(State *s); // YTS Team,
+                                                               // yamsat
 };
 
 typedef void (*TransactEntryFunc_t)(HttpTransact::State *s);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransactHeaders.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransactHeaders.cc b/proxy/http/HttpTransactHeaders.cc
index 28cdffc..0c700e3 100644
--- a/proxy/http/HttpTransactHeaders.cc
+++ b/proxy/http/HttpTransactHeaders.cc
@@ -1036,3 +1036,16 @@ HttpTransactHeaders::remove_privacy_headers_from_request(HttpConfigParams *http_
     }
   }
 }
+
+void
+HttpTransactHeaders::insert_request_range_header(HTTPHdr *header, HTTPRangeSpec const *ranges)
+{
+  int n;
+  char buff[1024];
+
+  if (ranges->hasRanges()) {
+    int64_t ffs = cacheProcessor.get_fixed_fragment_size();
+    n = ranges->print_quantized(buff, sizeof(buff), ffs, ffs);
+    header->value_set(MIME_FIELD_RANGE, MIME_LEN_RANGE, buff, n);
+  }
+}

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTransactHeaders.h
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTransactHeaders.h b/proxy/http/HttpTransactHeaders.h
index 505a6fa..c4d1b92 100644
--- a/proxy/http/HttpTransactHeaders.h
+++ b/proxy/http/HttpTransactHeaders.h
@@ -85,6 +85,7 @@ public:
   static void remove_privacy_headers_from_request(HttpConfigParams *http_config_param, OverridableHttpConfigParams *http_txn_conf,
                                                   HTTPHdr *header);
 
+  static void insert_request_range_header(HTTPHdr *header, HTTPRangeSpec const *ranges);
   static int nstrcpy(char *d, const char *as);
 };
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http/HttpTunnel.cc
----------------------------------------------------------------------
diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc
index 571d512..887bf77 100644
--- a/proxy/http/HttpTunnel.cc
+++ b/proxy/http/HttpTunnel.cc
@@ -830,13 +830,7 @@ HttpTunnel::producer_run(HttpTunnelProducer *p)
     }
   }
 
-  int64_t read_start_pos = 0;
-  if (p->vc_type == HT_CACHE_READ && sm->t_state.range_setup == HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED) {
-    ink_assert(sm->t_state.num_range_fields == 1); // we current just support only one range entry
-    read_start_pos = sm->t_state.ranges[0]._start;
-    producer_n = (sm->t_state.ranges[0]._end - sm->t_state.ranges[0]._start) + 1;
-    consumer_n = (producer_n + sm->client_response_hdr_bytes);
-  } else if (p->nbytes >= 0) {
+  if (p->nbytes >= 0) {
     consumer_n = p->nbytes;
     producer_n = p->ntodo;
   } else {
@@ -988,11 +982,7 @@ HttpTunnel::producer_run(HttpTunnelProducer *p)
       Debug("http_tunnel", "[%" PRId64 "] [tunnel_run] producer already done", sm->sm_id);
       producer_handler(HTTP_TUNNEL_EVENT_PRECOMPLETE, p);
     } else {
-      if (read_start_pos > 0) {
-        p->read_vio = ((CacheVC *)p->vc)->do_io_pread(this, producer_n, p->read_buffer, read_start_pos);
-      } else {
-        p->read_vio = p->vc->do_io_read(this, producer_n, p->read_buffer);
-      }
+      p->read_vio = p->vc->do_io_read(this, producer_n, p->read_buffer);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HPACK.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/HPACK.cc b/proxy/http2/HPACK.cc
index b074898..1b3a83c 100644
--- a/proxy/http2/HPACK.cc
+++ b/proxy/http2/HPACK.cc
@@ -241,8 +241,10 @@ Http2DynamicTable::add_header_field(const MIMEField *field)
   uint32_t header_size = ADDITIONAL_OCTETS + name_len + value_len;
 
   if (header_size > _settings_dynamic_table_size) {
-    // 5.3. It is not an error to attempt to add an entry that is larger than the maximum size; an
-    // attempt to add an entry larger than the entire table causes the table to be emptied of all existing entries.
+    // 5.3. It is not an error to attempt to add an entry that is larger than
+    // the maximum size; an
+    // attempt to add an entry larger than the entire table causes the table to
+    // be emptied of all existing entries.
     _headers.clear();
     _mhdr->fields_clear();
   } else {
@@ -602,7 +604,8 @@ decode_literal_header_field(MIMEFieldWrapper &header, const uint8_t *buf_start,
   HpackFieldType ftype = hpack_parse_field_type(*p);
 
   if (ftype == HPACK_FIELD_INDEXED_LITERAL) {
-    // 7.2.1. index extraction based on Literal Header Field with Incremental Indexing
+    // 7.2.1. index extraction based on Literal Header Field with Incremental
+    // Indexing
     len = decode_integer(index, p, buf_end, 6);
     isIncremental = true;
   } else if (ftype == HPACK_FIELD_NEVERINDEX_LITERAL) {
@@ -654,7 +657,6 @@ decode_literal_header_field(MIMEFieldWrapper &header, const uint8_t *buf_start,
   p += len;
   header.value_set(value_str, value_str_len);
 
-
   // Incremental Indexing adds header to header table as new entry
   if (isIncremental) {
     dynamic_table.add_header_field(header.field_get());

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HPACK.h
----------------------------------------------------------------------
diff --git a/proxy/http2/HPACK.h b/proxy/http2/HPACK.h
index 4e63a37..a385e93 100644
--- a/proxy/http2/HPACK.h
+++ b/proxy/http2/HPACK.h
@@ -47,9 +47,12 @@ const static int HPACK_ERROR_HTTP2_PROTOCOL_ERROR = -2;
 
 enum HpackFieldType {
   HPACK_FIELD_INDEX,              // HPACK 7.1 Indexed Header Field Representation
-  HPACK_FIELD_INDEXED_LITERAL,    // HPACK 7.2.1 Literal Header Field with Incremental Indexing
-  HPACK_FIELD_NOINDEX_LITERAL,    // HPACK 7.2.2 Literal Header Field without Indexing
-  HPACK_FIELD_NEVERINDEX_LITERAL, // HPACK 7.2.3 Literal Header Field never Indexed
+  HPACK_FIELD_INDEXED_LITERAL,    // HPACK 7.2.1 Literal Header Field with
+                                  // Incremental Indexing
+  HPACK_FIELD_NOINDEX_LITERAL,    // HPACK 7.2.2 Literal Header Field without
+                                  // Indexing
+  HPACK_FIELD_NEVERINDEX_LITERAL, // HPACK 7.2.3 Literal Header Field never
+                                  // Indexed
   HPACK_FIELD_TABLESIZE_UPDATE,   // HPACK 7.3 Header Table Size Update
 };
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HTTP2.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/HTTP2.cc b/proxy/http2/HTTP2.cc
index 5a6637b..6e3b711 100644
--- a/proxy/http2/HTTP2.cc
+++ b/proxy/http2/HTTP2.cc
@@ -102,7 +102,8 @@ http2_are_frame_flags_valid(uint8_t ftype, uint8_t fflags)
     HTTP2_FLAGS_WINDOW_UPDATE_MASK, HTTP2_FLAGS_CONTINUATION_MASK,
   };
 
-  // The frame flags are valid for this frame if nothing outside the defined bits is set.
+  // The frame flags are valid for this frame if nothing outside the defined
+  // bits is set.
   return (fflags & ~mask[ftype]) == 0;
 }
 
@@ -129,8 +130,7 @@ http2_settings_parameter_is_valid(const Http2SettingsParameter &param)
 {
   // Static maximum values for Settings parameters.
   static const uint32_t settings_max[HTTP2_SETTINGS_MAX] = {
-    0,
-    UINT_MAX,              // HTTP2_SETTINGS_HEADER_TABLE_SIZE
+    0, UINT_MAX,           // HTTP2_SETTINGS_HEADER_TABLE_SIZE
     1,                     // HTTP2_SETTINGS_ENABLE_PUSH
     UINT_MAX,              // HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS
     HTTP2_MAX_WINDOW_SIZE, // HTTP2_SETTINGS_INITIAL_WINDOW_SIZE
@@ -316,7 +316,6 @@ http2_parse_headers_parameter(IOVec iov, Http2HeadersParameter &params)
   return true;
 }
 
-
 // 6.3.  PRIORITY
 //
 // 0                   1                   2                   3
@@ -392,7 +391,6 @@ http2_parse_settings_parameter(IOVec iov, Http2SettingsParameter &param)
   return true;
 }
 
-
 // 6.8.  GOAWAY
 //
 // 0                   1                   2                   3
@@ -420,7 +418,6 @@ http2_parse_goaway(IOVec iov, Http2Goaway &goaway)
   return true;
 }
 
-
 // 6.9.  WINDOW_UPDATE
 //
 // 0                   1                   2                   3
@@ -587,8 +584,10 @@ http2_write_header_fragment(HTTPHdr *in, MIMEFieldIter &field_iter, uint8_t *out
   ink_assert(http_hdr_type_get(in->m_http) != HTTP_TYPE_UNKNOWN);
   ink_assert(in);
 
-  // TODO Get a index value from the tables for the header field, and then choose a representation type.
-  // TODO Each indexing types per field should be passed by a caller, HTTP/2 implementation.
+  // TODO Get a index value from the tables for the header field, and then
+  // choose a representation type.
+  // TODO Each indexing types per field should be passed by a caller, HTTP/2
+  // implementation.
 
   // Get first header field which is required encoding
   MIMEField *field;
@@ -766,7 +765,6 @@ Http2::init()
   REC_EstablishStaticConfigInt32U(max_header_list_size, "proxy.config.http2.max_header_list_size");
 }
 
-
 #if TS_HAS_TESTS
 
 #include "TestBox.h"
@@ -777,10 +775,11 @@ const static int MAX_TEST_FIELD_NUM = 8;
 
 /***********************************************************************************
  *                                                                                 *
- *                   Test cases for regression test                                *
+ *                   Test cases for regression test *
  *                                                                                 *
- * Some test cases are based on examples of specification.                         *
- * http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09#appendix-D  *
+ * Some test cases are based on examples of specification. *
+ * http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09#appendix-D
+ **
  *                                                                                 *
  ***********************************************************************************/
 
@@ -887,7 +886,7 @@ const static struct {
 
 /***********************************************************************************
  *                                                                                 *
- *                                Regression test codes                            *
+ *                                Regression test codes *
  *                                                                                 *
  ***********************************************************************************/
 

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/HTTP2.h
----------------------------------------------------------------------
diff --git a/proxy/http2/HTTP2.h b/proxy/http2/HTTP2.h
index bbeffd3..fe976de 100644
--- a/proxy/http2/HTTP2.h
+++ b/proxy/http2/HTTP2.h
@@ -33,7 +33,8 @@ class HTTPHdr;
 
 typedef unsigned Http2StreamId;
 
-// 6.9.2 Initial Flow Control Window Size - the flow control window can be come negative
+// 6.9.2 Initial Flow Control Window Size - the flow control window can be come
+// negative
 // so we need to track it with a signed type.
 typedef int32_t Http2WindowSize;
 
@@ -77,7 +78,6 @@ enum Http2ErrorCode {
   HTTP2_ERROR_ENHANCE_YOUR_CALM = 11,
   HTTP2_ERROR_INADEQUATE_SECURITY = 12,
   HTTP2_ERROR_HTTP_1_1_REQUIRED = 13,
-
   HTTP2_ERROR_MAX,
 };
 
@@ -103,7 +103,6 @@ enum Http2FrameType {
   HTTP2_FRAME_TYPE_GOAWAY = 7,
   HTTP2_FRAME_TYPE_WINDOW_UPDATE = 8,
   HTTP2_FRAME_TYPE_CONTINUATION = 9,
-
   HTTP2_FRAME_TYPE_MAX,
 };
 
@@ -111,7 +110,6 @@ enum Http2FrameType {
 enum Http2FrameFlagsData {
   HTTP2_FLAGS_DATA_END_STREAM = 0x01,
   HTTP2_FLAGS_DATA_PADDED = 0x08,
-
   HTTP2_FLAGS_DATA_MASK = 0x2B,
 };
 
@@ -121,7 +119,6 @@ enum Http2FrameFlagsHeaders {
   HTTP2_FLAGS_HEADERS_END_HEADERS = 0x04,
   HTTP2_FLAGS_HEADERS_PADDED = 0x08,
   HTTP2_FLAGS_HEADERS_PRIORITY = 0x20,
-
   HTTP2_FLAGS_HEADERS_MASK = 0x2B,
 };
 
@@ -136,27 +133,18 @@ enum Http2FrameFlagsRstStream {
 };
 
 // 6.4 Settings
-enum Http2FrameFlagsSettings {
-  HTTP2_FLAGS_SETTINGS_ACK = 0x01,
-
-  HTTP2_FLAGS_SETTINGS_MASK = 0x01
-};
+enum Http2FrameFlagsSettings { HTTP2_FLAGS_SETTINGS_ACK = 0x01, HTTP2_FLAGS_SETTINGS_MASK = 0x01 };
 
 // 6.6 Push Promise
 enum Http2FrameFlagsPushPromise {
   HTTP2_FLAGS_PUSH_PROMISE_END_HEADERS = 0x04,
   HTTP2_FLAGS_PUSH_PROMISE_PAD_LOW = 0x08,
   HTTP2_FLAGS_PUSH_PROMISE_PAD_HIGH = 0x10,
-
   HTTP2_FLAGS_PUSH_PROMISE_MASK = 0x1C,
 };
 
 // 6.7 Ping
-enum Http2FrameFlagsPing {
-  HTTP2_FLAGS_PING_ACK = 0x01,
-
-  HTTP2_FLAGS_PING_MASK = 0x01
-};
+enum Http2FrameFlagsPing { HTTP2_FLAGS_PING_ACK = 0x01, HTTP2_FLAGS_PING_MASK = 0x01 };
 
 // 6.8 Goaway
 enum Http2FrameFlagsGoaway {
@@ -173,7 +161,6 @@ enum Http2FrameFlagsContinuation {
   HTTP2_FLAGS_CONTINUATION_END_HEADERS = 0x04,
   HTTP2_FLAGS_CONTINUATION_PAD_LOW = 0x08,
   HTTP2_FLAGS_CONTINUATION_PAD_HIGH = 0x10,
-
   HTTP2_FLAGS_CONTINUATION_MASK = 0x1C,
 };
 
@@ -185,7 +172,6 @@ enum Http2SettingsIdentifier {
   HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 4,
   HTTP2_SETTINGS_MAX_FRAME_SIZE = 5,
   HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 6,
-
   HTTP2_SETTINGS_MAX
 };
 
@@ -222,8 +208,10 @@ struct Http2Goaway {
   Http2StreamId last_streamid;
   uint32_t error_code;
 
-  // NOTE: we don't (de)serialize the variable length debug data at this layer because there's
-  // really nothing we can do with it without some out of band agreement. Trying to deal with it
+  // NOTE: we don't (de)serialize the variable length debug data at this layer
+  // because there's
+  // really nothing we can do with it without some out of band agreement. Trying
+  // to deal with it
   // just complicates memory management.
 };
 
@@ -286,9 +274,10 @@ int64_t http2_write_psuedo_headers(HTTPHdr *, uint8_t *, uint64_t, Http2DynamicT
 
 int64_t http2_write_header_fragment(HTTPHdr *, MIMEFieldIter &, uint8_t *, uint64_t, Http2DynamicTable &, bool &);
 
-
-// Not sure where else to put this, but figure this is as good of a start as anything else.
-// Right now, only the static init() is available, which sets up some basic librecords
+// Not sure where else to put this, but figure this is as good of a start as
+// anything else.
+// Right now, only the static init() is available, which sets up some basic
+// librecords
 // dependencies.
 class Http2
 {

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2ClientSession.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2ClientSession.cc b/proxy/http2/Http2ClientSession.cc
index 40c4a50..87c9204 100644
--- a/proxy/http2/Http2ClientSession.cc
+++ b/proxy/http2/Http2ClientSession.cc
@@ -41,7 +41,8 @@
 
 ClassAllocator<Http2ClientSession> http2ClientSessionAllocator("http2ClientSessionAllocator");
 
-// memcpy the requested bytes from the IOBufferReader, returning how many were actually copied.
+// memcpy the requested bytes from the IOBufferReader, returning how many were
+// actually copied.
 static inline unsigned
 copy_from_buffer_reader(void *dst, IOBufferReader *reader, unsigned nbytes)
 {
@@ -94,7 +95,8 @@ Http2ClientSession::start()
   // 3.5 HTTP/2 Connection Preface. Upon establishment of a TCP connection and
   // determination that HTTP/2 will be used by both peers, each endpoint MUST
   // send a connection preface as a final confirmation ...
-  // this->write_buffer->write(HTTP2_CONNECTION_PREFACE, HTTP2_CONNECTION_PREFACE_LEN);
+  // this->write_buffer->write(HTTP2_CONNECTION_PREFACE,
+  // HTTP2_CONNECTION_PREFACE_LEN);
 
   this->connection_state.init();
   send_connection_event(&this->connection_state, HTTP2_SESSION_EVENT_INIT, this);
@@ -145,7 +147,8 @@ Http2ClientSession::set_upgrade_context(HTTPHdr *h)
       Http2SettingsParameter param;
       if (!http2_parse_settings_parameter(make_iovec(out_buf + nbytes, HTTP2_SETTINGS_PARAMETER_LEN), param) ||
           !http2_settings_parameter_is_valid(param)) {
-        // TODO ignore incoming invalid parameters and send suitable SETTINGS frame.
+        // TODO ignore incoming invalid parameters and send suitable SETTINGS
+        // frame.
       }
       upgrade_context.client_settings.set((Http2SettingsIdentifier)param.id, param.value);
     }
@@ -181,7 +184,8 @@ Http2ClientSession::do_io_shutdown(ShutdownHowTo_t howto)
   this->client_vc->do_io_shutdown(howto);
 }
 
-// XXX Currently, we don't have a half-closed state, but we will need to implement that. After we send a GOAWAY, there
+// XXX Currently, we don't have a half-closed state, but we will need to
+// implement that. After we send a GOAWAY, there
 // are scenarios where we would like to complete the outstanding streams.
 
 void
@@ -282,8 +286,10 @@ Http2ClientSession::state_read_connection_preface(int event, void *edata)
     }
   }
 
-  // XXX We don't have enough data to check the connection preface. We should reset the accept inactivity
-  // timeout. We should have a maximum timeout to get the session started though.
+  // XXX We don't have enough data to check the connection preface. We should
+  // reset the accept inactivity
+  // timeout. We should have a maximum timeout to get the session started
+  // though.
 
   vio->reenable();
   return 0;

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2ClientSession.h
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2ClientSession.h b/proxy/http2/Http2ClientSession.h
index 5e3ab23..3d41821 100644
--- a/proxy/http2/Http2ClientSession.h
+++ b/proxy/http2/Http2ClientSession.h
@@ -87,7 +87,8 @@ public:
     return this->hdr.cooked;
   }
 
-  // Allocate an IOBufferBlock for this frame. This switches us from using the in-line header
+  // Allocate an IOBufferBlock for this frame. This switches us from using the
+  // in-line header
   // buffer, to an external buffer block.
   void
   alloc(int index)
@@ -198,7 +199,6 @@ public:
     return upgrade_context;
   }
 
-
 private:
   Http2ClientSession(Http2ClientSession &);                  // noncopyable
   Http2ClientSession &operator=(const Http2ClientSession &); // noncopyable

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2ConnectionState.h
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2ConnectionState.h b/proxy/http2/Http2ConnectionState.h
index 61526f0..348206a 100644
--- a/proxy/http2/Http2ConnectionState.h
+++ b/proxy/http2/Http2ConnectionState.h
@@ -35,7 +35,8 @@ class Http2ConnectionSettings
 public:
   Http2ConnectionSettings()
   {
-    // 6.5.2.  Defined SETTINGS Parameters. These should generally not be modified,
+    // 6.5.2.  Defined SETTINGS Parameters. These should generally not be
+    // modified,
     // only if the protocol changes should these change.
     settings[indexof(HTTP2_SETTINGS_ENABLE_PUSH)] = 0; // Disabled for now
 
@@ -180,10 +181,10 @@ private:
   uint64_t data_length;
 };
 
-
 // Http2ConnectionState
 //
-// Capture the semantics of a HTTP/2 connection. The client session captures the frame layer, and the
+// Capture the semantics of a HTTP/2 connection. The client session captures the
+// frame layer, and the
 // connection state captures the connection-wide state.
 
 class Http2ConnectionState : public Continuation
@@ -213,7 +214,8 @@ public:
     continued_buffer.iov_base = NULL;
     continued_buffer.iov_len = 0;
 
-    // Load the server settings from the records.config / RecordsConfig.cc settings.
+    // Load the server settings from the records.config / RecordsConfig.cc
+    // settings.
     server_settings.settings_from_configs();
   }
 
@@ -282,7 +284,8 @@ private:
   // Counter for current acive streams which is started by client
   uint32_t client_streams_count;
 
-  // The buffer used for storing incomplete fragments of a header field which consists of multiple frames.
+  // The buffer used for storing incomplete fragments of a header field which
+  // consists of multiple frames.
   Http2StreamId continued_id;
   IOVec continued_buffer;
 };

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2SessionAccept.cc
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2SessionAccept.cc b/proxy/http2/Http2SessionAccept.cc
index fbb25db..5651e5b 100644
--- a/proxy/http2/Http2SessionAccept.cc
+++ b/proxy/http2/Http2SessionAccept.cc
@@ -38,7 +38,8 @@ Http2SessionAccept::~Http2SessionAccept()
 void
 Http2SessionAccept::accept(NetVConnection *netvc, MIOBuffer *iobuf, IOBufferReader *reader)
 {
-  // XXX we need to refactor the ACL checks from HttpSessionAccept so that we can invoke them here, and also in
+  // XXX we need to refactor the ACL checks from HttpSessionAccept so that we
+  // can invoke them here, and also in
   // the SPDY protocol layer ...
   // Warning("skipping access control checks for HTTP/2 connection");
 
@@ -48,8 +49,9 @@ Http2SessionAccept::accept(NetVConnection *netvc, MIOBuffer *iobuf, IOBufferRead
     const sockaddr *client_ip = netvc->get_remote_addr();
     ip_port_text_buffer ipb;
 
-    Debug("http2_seq", "[HttpSessionAccept2:mainEvent %p] accepted connection from %s transport type = %d", netvc,
-          ats_ip_nptop(client_ip, ipb, sizeof(ipb)), netvc->attributes);
+    Debug("http2_seq", "[HttpSessionAccept2:mainEvent %p] accepted connection "
+                       "from %s transport type = %d",
+          netvc, ats_ip_nptop(client_ip, ipb, sizeof(ipb)), netvc->attributes);
   }
 
   // XXX Allocate a Http2ClientSession
@@ -69,7 +71,8 @@ Http2SessionAccept::mainEvent(int event, void *data)
     return EVENT_CONT;
   }
 
-  // XXX We should hoist the error handling so that all the protocols generate the statistics
+  // XXX We should hoist the error handling so that all the protocols generate
+  // the statistics
   // without code duplication.
   if (((long)data) == -ECONNABORTED) {
     HTTP_SUM_DYN_STAT(http_ua_msecs_counts_errors_pre_accept_hangups_stat, 0);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/http2/Http2SessionAccept.h
----------------------------------------------------------------------
diff --git a/proxy/http2/Http2SessionAccept.h b/proxy/http2/Http2SessionAccept.h
index 6d6fce0..e4f219b 100644
--- a/proxy/http2/Http2SessionAccept.h
+++ b/proxy/http2/Http2SessionAccept.h
@@ -27,17 +27,21 @@
 #include "libts.h"
 #include "I_Net.h"
 
-// XXX HttpSessionAccept::Options needs to be refactored and separated from HttpSessionAccept so that
+// XXX HttpSessionAccept::Options needs to be refactored and separated from
+// HttpSessionAccept so that
 // it can generically apply to all protocol implementations.
 #include "http/HttpSessionAccept.h"
 
 // HTTP/2 Session Accept.
 //
-// HTTP/2 needs to be explicitly enabled on a server port. The syntax is different for SSL and raw
-// ports. There's currently no support for the HTTP/1.1 upgrade path. The example below configures
+// HTTP/2 needs to be explicitly enabled on a server port. The syntax is
+// different for SSL and raw
+// ports. There's currently no support for the HTTP/1.1 upgrade path. The
+// example below configures
 // HTTP/2 on port 80 and port 443 (with TLS).
 //
-// CONFIG proxy.config.http.server_ports STRING 80:proto=http2 443:ssl:proto=h2-12
+// CONFIG proxy.config.http.server_ports STRING 80:proto=http2
+// 443:ssl:proto=h2-12
 
 struct Http2SessionAccept : public SessionAccept {
   explicit Http2SessionAccept(const HttpSessionAccept::Options &);

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/Log.cc
----------------------------------------------------------------------
diff --git a/proxy/logging/Log.cc b/proxy/logging/Log.cc
index 332131b..84149c4 100644
--- a/proxy/logging/Log.cc
+++ b/proxy/logging/Log.cc
@@ -130,9 +130,12 @@ Log::change_configuration()
   ink_mutex_release(prev->log_object_manager._APImutex);
   Debug("log-api-mutex", "Log::change_configuration released api mutex");
 
-  // Register the new config in the config processor; the old one will now be scheduled for a
-  // future deletion. We don't need to do anything magical with refcounts, since the
-  // configProcessor will keep a reference count, and drop it when the deletion is scheduled.
+  // Register the new config in the config processor; the old one will now be
+  // scheduled for a
+  // future deletion. We don't need to do anything magical with refcounts, since
+  // the
+  // configProcessor will keep a reference count, and drop it when the deletion
+  // is scheduled.
   configProcessor.set(log_configid, new_config);
 
   // If we replaced the logging configuration, flush any log
@@ -516,11 +519,12 @@ Log::init_fields()
     SQUID_LOG_ERR_FUTURE_1, "ERR_FUTURE_1", SQUID_LOG_ERR_UNKNOWN, "ERR_UNKNOWN");
 
   Ptr<LogFieldAliasTable> cache_hit_miss_map = make_ptr(new LogFieldAliasTable);
-  cache_hit_miss_map->init(23, SQUID_HIT_RESERVED, "HIT", SQUID_HIT_LEVEL_1, "HIT_RAM", // Also SQUID_HIT_RAM
-                           SQUID_HIT_LEVEL_2, "HIT_SSD",                                // Also SQUID_HIT_SSD
-                           SQUID_HIT_LEVEL_3, "HIT_DISK",                               // Also SQUID_HIT_DISK
-                           SQUID_HIT_LEVEL_4, "HIT_CLUSTER",                            // Also SQUID_HIT_CLUSTER
-                           SQUID_HIT_LEVEL_5, "HIT_NET",                                // Also SQUID_HIT_NET
+  cache_hit_miss_map->init(23, SQUID_HIT_RESERVED, "HIT", SQUID_HIT_LEVEL_1,
+                           "HIT_RAM",                        // Also SQUID_HIT_RAM
+                           SQUID_HIT_LEVEL_2, "HIT_SSD",     // Also SQUID_HIT_SSD
+                           SQUID_HIT_LEVEL_3, "HIT_DISK",    // Also SQUID_HIT_DISK
+                           SQUID_HIT_LEVEL_4, "HIT_CLUSTER", // Also SQUID_HIT_CLUSTER
+                           SQUID_HIT_LEVEL_5, "HIT_NET",     // Also SQUID_HIT_NET
                            SQUID_HIT_LEVEL_6, "HIT_LEVEL_6", SQUID_HIT_LEVEL_7, "HIT_LEVEL_7", SQUID_HIT_LEVEL_8, "HIT_LEVEL_8",
                            SQUID_HIT_LEVEl_9, "HIT_LEVEL_9", SQUID_MISS_NONE, "MISS", SQUID_MISS_ICP_AUTH, "MISS_ICP_AUTH",
                            SQUID_MISS_HTTP_NON_CACHE, "MISS_HTTP_NON_CACHE", SQUID_MISS_ICP_STOPLIST, "MISS_ICP_STOPLIST",
@@ -1098,8 +1102,9 @@ Log::flush_thread_main(void * /* args ATS_UNUSED */)
       //
       while (total_bytes - bytes_written) {
         if (Log::config->logging_space_exhausted) {
-          Debug("log", "logging space exhausted, failed to write file:%s, have dropped (%d) bytes.", logfile->get_name(),
-                (total_bytes - bytes_written));
+          Debug("log", "logging space exhausted, failed to write file:%s, have "
+                       "dropped (%d) bytes.",
+                logfile->get_name(), (total_bytes - bytes_written));
 
           RecIncrRawStat(log_rsb, mutex->thread_holding, log_stat_bytes_lost_before_written_to_disk_stat,
                          total_bytes - bytes_written);
@@ -1232,7 +1237,9 @@ Log::collate_thread_main(void * /* args ATS_UNUSED */)
       }
 
       if (header->version != LOG_SEGMENT_VERSION) {
-        Note("Invalid LogBuffer received; invalid version - buffer = %u, current = %u", header->version, LOG_SEGMENT_VERSION);
+        Note("Invalid LogBuffer received; invalid version - buffer = %u, "
+             "current = %u",
+             header->version, LOG_SEGMENT_VERSION);
         delete[] header;
         continue;
       }

http://git-wip-us.apache.org/repos/asf/trafficserver/blob/1c06db83/proxy/logging/Log.h
----------------------------------------------------------------------
diff --git a/proxy/logging/Log.h b/proxy/logging/Log.h
index fb194c9..8a26b8d 100644
--- a/proxy/logging/Log.h
+++ b/proxy/logging/Log.h
@@ -63,7 +63,8 @@
   o Initial State
 
       - A LogBufferPool is allocated, with storage equal to
-        sizeof (LogBufferPoolHeader) + buffer_segment_count * buffer_segment_size
+        sizeof (LogBufferPoolHeader) + buffer_segment_count *
+  buffer_segment_size
 
       - The buffer pool space is divided into buffer_segment_count
         segments, each with a fixed size of buffer_segment_size.
@@ -467,7 +468,6 @@ private:
   Log &operator=(const Log &rhs);
 };
 
-
 static inline bool
 LogRollingEnabledIsValid(int enabled)
 {