You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2019/11/08 21:08:55 UTC
[trafficserver] branch 9.0.x updated: thread ready check
This is an automated email from the ASF dual-hosted git repository.
zwoop pushed a commit to branch 9.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/9.0.x by this push:
new 25db8be thread ready check
25db8be is described below
commit 25db8be84f4be674fead009f32da63b19112f121
Author: Fei Deng <du...@gmail.com>
AuthorDate: Mon Oct 28 10:15:26 2019 -0500
thread ready check
(cherry picked from commit 3ceab9e3d0426000ec1ec3c34decdb375906f073)
---
iocore/eventsystem/I_EventProcessor.h | 4 ++++
iocore/eventsystem/UnixEventProcessor.cc | 27 +++++++++++++++++++++++----
proxy/http/HttpProxyServerMain.cc | 4 ++--
proxy/http/HttpProxyServerMain.h | 2 +-
src/traffic_server/traffic_server.cc | 12 +++++++++---
5 files changed, 39 insertions(+), 10 deletions(-)
diff --git a/iocore/eventsystem/I_EventProcessor.h b/iocore/eventsystem/I_EventProcessor.h
index 761f9ca..227ea19 100644
--- a/iocore/eventsystem/I_EventProcessor.h
+++ b/iocore/eventsystem/I_EventProcessor.h
@@ -325,6 +325,8 @@ public:
*/
int n_ethreads = 0;
+ bool has_tg_started(int etype);
+
/*------------------------------------------------------*\
| Unix & non NT Interface |
\*------------------------------------------------------*/
@@ -410,3 +412,5 @@ private:
};
extern inkcoreapi class EventProcessor eventProcessor;
+
+void thread_started(EThread *);
diff --git a/iocore/eventsystem/UnixEventProcessor.cc b/iocore/eventsystem/UnixEventProcessor.cc
index 2182c34..2661b67 100644
--- a/iocore/eventsystem/UnixEventProcessor.cc
+++ b/iocore/eventsystem/UnixEventProcessor.cc
@@ -371,6 +371,7 @@ EventProcessor::spawn_event_threads(EventType ev_type, int n_threads, size_t sta
}
tg->_count = n_threads;
n_ethreads += n_threads;
+ schedule_spawn(&thread_started, ev_type);
// Separate loop to avoid race conditions between spawn events and updating the thread table for
// the group. Some thread set up depends on knowing the total number of threads but that can't be
@@ -394,10 +395,7 @@ EventProcessor::initThreadState(EThread *t)
{
// Run all thread type initialization continuations that match the event types for this thread.
for (int i = 0; i < MAX_EVENT_TYPES; ++i) {
- if (t->is_event_type(i)) { // that event type done here, roll thread start events of that type.
- if (++thread_group[i]._started == thread_group[i]._count && thread_group[i]._afterStartCallback != nullptr) {
- thread_group[i]._afterStartCallback();
- }
+ if (t->is_event_type(i)) {
// To avoid race conditions on the event in the spawn queue, create a local one to actually send.
// Use the spawn queue event as a read only model.
Event *nev = eventAllocator.alloc();
@@ -491,3 +489,24 @@ EventProcessor::spawn_thread(Continuation *cont, const char *thr_name, size_t st
return e;
}
+
+bool
+EventProcessor::has_tg_started(int etype)
+{
+ return thread_group[etype]._started == thread_group[etype]._count;
+}
+
+void
+thread_started(EThread *t)
+{
+ // Find what type of thread this is, and increment the "_started" counter of that thread type.
+ for (int i = 0; i < MAX_EVENT_TYPES; ++i) {
+ if (t->is_event_type(i)) {
+ if (++eventProcessor.thread_group[i]._started == eventProcessor.thread_group[i]._count &&
+ eventProcessor.thread_group[i]._afterStartCallback != nullptr) {
+ eventProcessor.thread_group[i]._afterStartCallback();
+ }
+ break;
+ }
+ }
+}
diff --git a/proxy/http/HttpProxyServerMain.cc b/proxy/http/HttpProxyServerMain.cc
index 48ea161..9c80490 100644
--- a/proxy/http/HttpProxyServerMain.cc
+++ b/proxy/http/HttpProxyServerMain.cc
@@ -308,9 +308,9 @@ init_accept_HttpProxyServer(int n_accept_threads)
* start_HttpProxyServer().
*/
void
-init_HttpProxyServer(EThread *)
+init_HttpProxyServer()
{
- if (eventProcessor.thread_group[ET_NET]._started == num_of_net_threads) {
+ if (eventProcessor.has_tg_started(ET_NET)) {
std::unique_lock<std::mutex> lock(proxyServerMutex);
et_net_threads_ready = true;
lock.unlock();
diff --git a/proxy/http/HttpProxyServerMain.h b/proxy/http/HttpProxyServerMain.h
index 96c461a..1de9ced 100644
--- a/proxy/http/HttpProxyServerMain.h
+++ b/proxy/http/HttpProxyServerMain.h
@@ -38,7 +38,7 @@ void init_accept_HttpProxyServer(int n_accept_threads = 0);
/** Checkes whether we can call start_HttpProxyServer().
*/
-void init_HttpProxyServer(EThread *);
+void init_HttpProxyServer();
/** Start the proxy server.
The port data should have been created by @c prep_HttpProxyServer().
diff --git a/src/traffic_server/traffic_server.cc b/src/traffic_server/traffic_server.cc
index 07d46ad..44e3d24 100644
--- a/src/traffic_server/traffic_server.cc
+++ b/src/traffic_server/traffic_server.cc
@@ -1813,10 +1813,10 @@ main(int /* argc ATS_UNUSED */, const char **argv)
quic_NetProcessor.init();
#endif
- // If num_accept_threads == 0, let the ET_NET threads to set the condition variable,
+ // If num_accept_threads == 0, let the ET_NET threads set the condition variable,
// Else we set it here so when checking the condition variable later it returns immediately.
- if (num_accept_threads == 0) {
- eventProcessor.schedule_spawn(&init_HttpProxyServer, ET_NET);
+ if (num_accept_threads == 0 || command_flag) {
+ eventProcessor.thread_group[ET_NET]._afterStartCallback = init_HttpProxyServer;
} else {
std::unique_lock<std::mutex> lock(proxyServerMutex);
et_net_threads_ready = true;
@@ -1858,6 +1858,12 @@ main(int /* argc ATS_UNUSED */, const char **argv)
int cmd_ret = cmd_mode();
if (cmd_ret != CMD_IN_PROGRESS) {
+ // Check the condition variable.
+ {
+ std::unique_lock<std::mutex> lock(proxyServerMutex);
+ proxyServerCheck.wait(lock, [] { return et_net_threads_ready; });
+ }
+
if (cmd_ret >= 0) {
::exit(0); // everything is OK
} else {