You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2020/06/23 19:58:01 UTC
[trafficserver] 02/02: Disable max_connections_active_in default
now that featur works (#6903)
This is an automated email from the ASF dual-hosted git repository.
zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
commit c02d4804f4dc226164a8b391eedd79a0f71e4468
Author: Susan Hinrichs <sh...@yahoo-inc.com>
AuthorDate: Mon Jun 22 17:54:13 2020 -0500
Disable max_connections_active_in default now that featur works (#6903)
Co-authored-by: Susan Hinrichs <sh...@apache.org>
(cherry picked from commit 54cff079598f67e01a80a3badb9f7c772562df74)
---
iocore/net/P_UnixNet.h | 2 +-
iocore/net/UnixNet.cc | 11 ++++++++---
mgmt/RecordsConfig.cc | 2 +-
proxy/http2/Http2ClientSession.cc | 1 +
tests/gold_tests/h2/h2active_timeout.py | 29 +++++++++++++++++------------
tests/gold_tests/h2/http2.test.py | 2 +-
6 files changed, 29 insertions(+), 18 deletions(-)
diff --git a/iocore/net/P_UnixNet.h b/iocore/net/P_UnixNet.h
index da1057d..82c7884 100644
--- a/iocore/net/P_UnixNet.h
+++ b/iocore/net/P_UnixNet.h
@@ -297,7 +297,7 @@ public:
void process_enabled_list();
void process_ready_list();
void manage_keep_alive_queue();
- bool manage_active_queue(bool ignore_queue_size);
+ bool manage_active_queue(NetEvent *ne, bool ignore_queue_size);
void add_to_keep_alive_queue(NetEvent *ne);
void remove_from_keep_alive_queue(NetEvent *ne);
bool add_to_active_queue(NetEvent *ne);
diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc
index 653b675..f368e07 100644
--- a/iocore/net/UnixNet.cc
+++ b/iocore/net/UnixNet.cc
@@ -111,7 +111,7 @@ public:
// Therefore we don't have to check all the NetEvents as much as open_list.
// Cleanup the active and keep-alive queues periodically
- nh.manage_active_queue(true); // close any connections over the active timeout
+ nh.manage_active_queue(nullptr, true); // close any connections over the active timeout
nh.manage_keep_alive_queue();
return 0;
@@ -565,7 +565,7 @@ NetHandler::signalActivity()
}
bool
-NetHandler::manage_active_queue(bool ignore_queue_size = false)
+NetHandler::manage_active_queue(NetEvent *enabling_ne, bool ignore_queue_size = false)
{
const int total_connections_in = active_queue_size + keep_alive_queue_size;
Debug("v_net_queue",
@@ -594,6 +594,11 @@ NetHandler::manage_active_queue(bool ignore_queue_size = false)
int total_idle_count = 0;
for (; ne != nullptr; ne = ne_next) {
ne_next = ne->active_queue_link.next;
+ // It seems dangerous closing the current ne at this point
+ // Let the activity_cop deal with it
+ if (ne == enabling_ne) {
+ continue;
+ }
if ((ne->inactivity_timeout_in && ne->next_inactivity_timeout_at <= now) ||
(ne->active_timeout_in && ne->next_activity_timeout_at <= now)) {
_close_ne(ne, now, handle_event, closed, total_idle_time, total_idle_count);
@@ -741,7 +746,7 @@ NetHandler::add_to_active_queue(NetEvent *ne)
bool active_queue_full = false;
// if active queue is over size then close inactive connections
- if (manage_active_queue() == false) {
+ if (manage_active_queue(ne) == false) {
active_queue_full = true;
}
diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc
index a70200d..2f293bc 100644
--- a/mgmt/RecordsConfig.cc
+++ b/mgmt/RecordsConfig.cc
@@ -400,7 +400,7 @@ static const RecordElement RecordsConfig[] =
,
{RECT_CONFIG, "proxy.config.net.max_connections_in", RECD_INT, "30000", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
,
- {RECT_CONFIG, "proxy.config.net.max_connections_active_in", RECD_INT, "10000", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
+ {RECT_CONFIG, "proxy.config.net.max_connections_active_in", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL}
,
// ###########################
diff --git a/proxy/http2/Http2ClientSession.cc b/proxy/http2/Http2ClientSession.cc
index 638679f..8045883 100644
--- a/proxy/http2/Http2ClientSession.cc
+++ b/proxy/http2/Http2ClientSession.cc
@@ -346,6 +346,7 @@ Http2ClientSession::main_event_handler(int event, void *edata)
case VC_EVENT_INACTIVITY_TIMEOUT:
case VC_EVENT_ERROR:
case VC_EVENT_EOS:
+ Http2SsnDebug("Closing event %d", event);
this->set_dying_event(event);
this->do_io_close();
if (_vc != nullptr) {
diff --git a/tests/gold_tests/h2/h2active_timeout.py b/tests/gold_tests/h2/h2active_timeout.py
index aa79cb1..d2f47f2 100644
--- a/tests/gold_tests/h2/h2active_timeout.py
+++ b/tests/gold_tests/h2/h2active_timeout.py
@@ -22,23 +22,25 @@ import argparse
import time
-def makerequest(port):
+def makerequest(port, active_timeout):
hyper.tls._context = hyper.tls.init_context()
hyper.tls._context.check_hostname = False
hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE
conn = HTTPConnection('localhost:{0}'.format(port), secure=True)
- active_timeout = 3
- request_interval = 0.1
- loop_cnt = int((active_timeout + 2) / request_interval)
- for _ in range(loop_cnt):
- try:
- conn.request('GET', '/')
- time.sleep(request_interval)
- except:
- print('CONNECTION_TIMEOUT')
- return
+ try:
+ # delay after sending the first request
+ # so the H2 session active timeout triggers
+ # Then the next request should fail
+ req_id = conn.request('GET', '/')
+ time.sleep(active_timeout)
+ response = conn.get_response(req_id)
+ req_id = conn.request('GET', '/')
+ response = conn.get_response(req_id)
+ except:
+ print('CONNECTION_TIMEOUT')
+ return
print('NO_TIMEOUT')
@@ -48,8 +50,11 @@ def main():
parser.add_argument("--port", "-p",
type=int,
help="Port to use")
+ parser.add_argument("--delay", "-d",
+ type=int,
+ help="Time to delay in seconds")
args = parser.parse_args()
- makerequest(args.port)
+ makerequest(args.port, args.delay)
if __name__ == '__main__':
diff --git a/tests/gold_tests/h2/http2.test.py b/tests/gold_tests/h2/http2.test.py
index 2ee5ce5..d75a9d4 100644
--- a/tests/gold_tests/h2/http2.test.py
+++ b/tests/gold_tests/h2/http2.test.py
@@ -152,7 +152,7 @@ tr.StillRunningAfter = server
# Test Case 5: h2_active_timeout
tr = Test.AddTestRun()
-tr.Processes.Default.Command = 'python3 h2active_timeout.py -p {0}'.format(ts.Variables.ssl_port)
+tr.Processes.Default.Command = 'python3 h2active_timeout.py -p {0} -d 4'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = "gold/active_timeout.gold"
tr.StillRunningAfter = server