You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by du...@apache.org on 2019/01/22 15:53:43 UTC

[trafficserver] branch master updated: new scheduling and thread affinity apis with tests

This is an automated email from the ASF dual-hosted git repository.

duke8253 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
     new 45be602  new scheduling and thread affinity apis with tests
45be602 is described below

commit 45be6020b06874510ee5e02ec9e9c4b52ac5e853
Author: Fei Deng <fe...@oath.com>
AuthorDate: Thu Oct 4 13:54:55 2018 -0700

    new scheduling and thread affinity apis with tests
---
 .../api/functions/TSContSchedule.en.rst            |  29 +-
 ...Schedule.en.rst => TSContScheduleOnPool.en.rst} |  30 +-
 .../TSContScheduleOnThread.en.rst}                 |  38 +--
 .../TSContThreadAffinityClear.en.rst}              |  32 +-
 .../TSContThreadAffinityGet.en.rst}                |  32 +-
 .../TSContThreadAffinitySet.en.rst}                |  40 +--
 doc/developer-guide/api/types/TSThreadPool.en.rst  |   2 -
 doc/developer-guide/plugins/actions/index.en.rst   |   6 +-
 .../continuations/writing-handler-functions.en.rst |   6 +
 example/blacklist_1/blacklist_1.c                  |   4 +-
 example/thread_pool/psi.c                          |   6 +-
 include/ts/apidefs.h.in                            |   2 +-
 include/ts/ts.h                                    |  12 +-
 include/tscpp/api/AsyncTimer.h                     |   2 +-
 include/tscpp/api/Continuation.h                   |   8 +-
 iocore/eventsystem/I_Continuation.h                |  25 ++
 iocore/eventsystem/I_EThread.h                     |   2 +
 iocore/eventsystem/P_UnixEThread.h                 |  11 +
 iocore/eventsystem/P_UnixEventProcessor.h          |   9 +-
 iocore/eventsystem/UnixEThread.cc                  |   1 +
 plugins/background_fetch/background_fetch.cc       |   2 +-
 plugins/esi/esi.cc                                 |   2 +-
 plugins/experimental/certifier/certifier.cc        |   2 +-
 .../collapsed_forwarding/collapsed_forwarding.cc   |   2 +-
 plugins/experimental/header_freq/header_freq.cc    |   2 +-
 plugins/experimental/inliner/ts.cc                 |   2 +-
 plugins/experimental/prefetch/fetch.cc             |   2 +-
 .../stale_while_revalidate.c                       |   4 +-
 plugins/experimental/system_stats/system_stats.c   |   4 +-
 plugins/generator/generator.cc                     |   2 +-
 plugins/lua/ts_lua_http_intercept.c                |   2 +-
 plugins/lua/ts_lua_misc.c                          |   4 +-
 plugins/regex_revalidate/regex_revalidate.c        |   6 +-
 src/traffic_server/InkAPI.cc                       | 173 ++++++++++-
 src/traffic_server/InkAPITest.cc                   |  76 ++---
 src/traffic_server/InkIOCoreAPI.cc                 |   6 +
 src/tscpp/api/AsyncTimer.cc                        |   6 +-
 src/tscpp/api/InterceptPlugin.cc                   |   2 +-
 tests/gold_tests/cont_schedule/gold/http_200.gold  |   9 +
 tests/gold_tests/cont_schedule/gold/schedule.gold  |   4 +
 .../cont_schedule/gold/schedule_on_pool.gold       |   9 +
 .../cont_schedule/gold/schedule_on_thread.gold     |   4 +
 .../cont_schedule/gold/thread_affinity.gold        |   5 +
 tests/gold_tests/cont_schedule/schedule.test.py    |  72 +++++
 .../cont_schedule/schedule_on_pool.test.py         |  72 +++++
 .../cont_schedule/schedule_on_thread.test.py       |  72 +++++
 .../cont_schedule/thread_affinity.test.py          |  72 +++++
 tests/tools/plugins/cont_schedule.cc               | 327 +++++++++++++++++++++
 tests/tools/plugins/continuations_verify.cc        |   2 +-
 tests/tools/plugins/ssl_hook_test.cc               |   6 +-
 tests/tools/plugins/ssntxnorder_verify.cc          |   2 +-
 51 files changed, 1017 insertions(+), 235 deletions(-)

diff --git a/doc/developer-guide/api/functions/TSContSchedule.en.rst b/doc/developer-guide/api/functions/TSContSchedule.en.rst
index 944a481..dea45b0 100644
--- a/doc/developer-guide/api/functions/TSContSchedule.en.rst
+++ b/doc/developer-guide/api/functions/TSContSchedule.en.rst
@@ -26,7 +26,7 @@ Synopsis
 
 `#include <ts/ts.h>`
 
-.. function:: TSAction TSContSchedule(TSCont contp, ink_hrtime delay, TSThreadPool tp)
+.. function:: TSAction TSContSchedule(TSCont contp, TSHRTime timeout)
 
 Description
 ===========
@@ -38,27 +38,12 @@ not be effective. :arg:`contp` is required to have a mutex, which is provided to
 
 The return value can be used to cancel the scheduled event via :func:`TSActionCancel`. This is
 effective until the continuation :arg:`contp` is being dispatched. However, if it is scheduled on
-another thread this can problematic to be correctly timed. The return value can be checked with
+another thread this can be problematic to be correctly timed. The return value can be checked with
 :func:`TSActionDone` to see if the continuation ran before the return, which is possible if
-:arg:`delay` is `0`.
+:arg:`timeout` is `0`. Returns ``nullptr`` if thread affinity was cleared.
 
-The continuation is scheduled for a particular thread selected from a group of similar threads, as indicated by :arg:`tp`.
-
-=========================== =======================================================================================
-Pool                        Properties
-=========================== =======================================================================================
-``TS_THREAD_POOL_DEFAULT``  Use the default pool. Continuations using this must not block.
-``TS_THREAD_POOL_NET``      Transaction processing threads. Continuations on these threads must not block.
-``TS_THREAD_POOL_TASK``     Background threads. Continuations can perform blocking operations.
-``TS_THREAD_POOL_SSL``      *DEPRECATED* - these are no longer used as of ATS 6.
-``TS_THREAD_POOL_DNS``      DNS request processing. May not exist depending on configuration. Not recommended.
-``TS_THREAD_POOL_REMAP``    *DEPRECATED* - these are not longer used.
-``TS_THREAD_POOL_CLUSTER``  *DEPRECATED* - these are no longer used as of ATS 7.
-``TS_THREAD_POOL_UDP``      *DEPRECATED*
-=========================== =======================================================================================
+See Also
+========
 
-In practice, any choice except ``TS_THREAD_POOL_NET`` or ``TS_THREAD_POOL_TASK`` is strong not
-recommended. The ``TS_THREAD_POOL_NET`` threads are the same threads on which callback hooks are
-called and continuations that use them have the same restrictions. ``TS_THREAD_POOL_TASK`` threads
-are threads that exist to perform long or blocking actions, although sufficiently long operation can
-impact system performance by blocking other continuations on the threads.
+:doc:`TSContScheduleOnPool.en`
+:doc:`TSContScheduleOnThread.en`
diff --git a/doc/developer-guide/api/functions/TSContSchedule.en.rst b/doc/developer-guide/api/functions/TSContScheduleOnPool.en.rst
similarity index 70%
copy from doc/developer-guide/api/functions/TSContSchedule.en.rst
copy to doc/developer-guide/api/functions/TSContScheduleOnPool.en.rst
index 944a481..462f6a9 100644
--- a/doc/developer-guide/api/functions/TSContSchedule.en.rst
+++ b/doc/developer-guide/api/functions/TSContScheduleOnPool.en.rst
@@ -18,47 +18,45 @@
 
 .. default-domain:: c
 
-TSContSchedule
-**************
+TSContScheduleOnPool
+********************
 
 Synopsis
 ========
 
 `#include <ts/ts.h>`
 
-.. function:: TSAction TSContSchedule(TSCont contp, ink_hrtime delay, TSThreadPool tp)
+.. function:: TSAction TSContScheduleOnPool(TSCont contp, TSHRTime timeout, TSThreadPool tp)
 
 Description
 ===========
 
-Schedules :arg:`contp` to run :arg:`delay` milliseconds in the future. This is approximate. The delay
-will be at least :arg:`delay` but possibly more. Resolutions finer than roughly 5 milliseconds will
-not be effective. :arg:`contp` is required to have a mutex, which is provided to
-:func:`TSContCreate`.
-
-The return value can be used to cancel the scheduled event via :func:`TSActionCancel`. This is
-effective until the continuation :arg:`contp` is being dispatched. However, if it is scheduled on
-another thread this can problematic to be correctly timed. The return value can be checked with
-:func:`TSActionDone` to see if the continuation ran before the return, which is possible if
-:arg:`delay` is `0`.
+Mostly the same as :func:`TSContSchedule`. Schedules :arg:`contp` on a random thread that belongs to :arg:`tp`.
+If thread type of the thread specified by thread affinity is the same as :arg:`tp`, the :arg:`contp` will
+be scheduled on the thread specified by thread affinity.
 
 The continuation is scheduled for a particular thread selected from a group of similar threads, as indicated by :arg:`tp`.
 
 =========================== =======================================================================================
 Pool                        Properties
 =========================== =======================================================================================
-``TS_THREAD_POOL_DEFAULT``  Use the default pool. Continuations using this must not block.
 ``TS_THREAD_POOL_NET``      Transaction processing threads. Continuations on these threads must not block.
 ``TS_THREAD_POOL_TASK``     Background threads. Continuations can perform blocking operations.
 ``TS_THREAD_POOL_SSL``      *DEPRECATED* - these are no longer used as of ATS 6.
 ``TS_THREAD_POOL_DNS``      DNS request processing. May not exist depending on configuration. Not recommended.
-``TS_THREAD_POOL_REMAP``    *DEPRECATED* - these are not longer used.
+``TS_THREAD_POOL_REMAP``    *DEPRECATED* - these are no longer used.
 ``TS_THREAD_POOL_CLUSTER``  *DEPRECATED* - these are no longer used as of ATS 7.
 ``TS_THREAD_POOL_UDP``      *DEPRECATED*
 =========================== =======================================================================================
 
-In practice, any choice except ``TS_THREAD_POOL_NET`` or ``TS_THREAD_POOL_TASK`` is strong not
+In practice, any choice except ``TS_THREAD_POOL_NET`` or ``TS_THREAD_POOL_TASK`` is strongly not
 recommended. The ``TS_THREAD_POOL_NET`` threads are the same threads on which callback hooks are
 called and continuations that use them have the same restrictions. ``TS_THREAD_POOL_TASK`` threads
 are threads that exist to perform long or blocking actions, although sufficiently long operation can
 impact system performance by blocking other continuations on the threads.
+
+See Also
+========
+
+:doc:`TSContSchedule.en`
+:doc:`TSContScheduleOnThread.en`
diff --git a/doc/developer-guide/api/types/TSThreadPool.en.rst b/doc/developer-guide/api/functions/TSContScheduleOnThread.en.rst
similarity index 62%
copy from doc/developer-guide/api/types/TSThreadPool.en.rst
copy to doc/developer-guide/api/functions/TSContScheduleOnThread.en.rst
index e7140a9..fed6313 100644
--- a/doc/developer-guide/api/types/TSThreadPool.en.rst
+++ b/doc/developer-guide/api/functions/TSContScheduleOnThread.en.rst
@@ -16,37 +16,25 @@
 
 .. include:: ../../../common.defs
 
-TSThreadPool
-************
+.. default-domain:: c
+
+TSContScheduleOnThread
+**********************
 
 Synopsis
 ========
 
-`#include <ts/apidefs.h>`
-
-.. c:type:: TSThreadPool
-
-Enum typedef.
-
-Enumeration Members
-===================
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DEFAULT
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_NET
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_TASK
+`#include <ts/ts.h>`
 
-.. c:member:: TSThreadPool TS_THREAD_POOL_SSL
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DNS
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_REMAP
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_CLUSTER
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_UDP
+.. function:: TSAction TSContScheduleOnThread(TSCont contp, TSHRTime timeout, TSEventThread ethread)
 
 Description
 ===========
 
+Mostly the same as :func:`TSContSchedule`. Schedules :arg:`contp` on :arg:`ethread`.
+
+See Also
+========
+
+:doc:`TSContSchedule.en`
+:doc:`TSContScheduleOnPool.en`
diff --git a/doc/developer-guide/api/types/TSThreadPool.en.rst b/doc/developer-guide/api/functions/TSContThreadAffinityClear.en.rst
similarity index 62%
copy from doc/developer-guide/api/types/TSThreadPool.en.rst
copy to doc/developer-guide/api/functions/TSContThreadAffinityClear.en.rst
index e7140a9..9846535 100644
--- a/doc/developer-guide/api/types/TSThreadPool.en.rst
+++ b/doc/developer-guide/api/functions/TSContThreadAffinityClear.en.rst
@@ -16,37 +16,19 @@
 
 .. include:: ../../../common.defs
 
-TSThreadPool
-************
+.. default-domain:: c
+
+TSContThreadAffinityClear
+**************
 
 Synopsis
 ========
 
-`#include <ts/apidefs.h>`
-
-.. c:type:: TSThreadPool
-
-Enum typedef.
-
-Enumeration Members
-===================
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DEFAULT
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_NET
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_TASK
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_SSL
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DNS
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_REMAP
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_CLUSTER
+`#include <ts/ts.h>`
 
-.. c:member:: TSThreadPool TS_THREAD_POOL_UDP
+.. function:: void TSContThreadAffinityClear(TSCont contp)
 
 Description
 ===========
 
+Clear the thread affinity of continuation :arg:`contp`.
diff --git a/doc/developer-guide/api/types/TSThreadPool.en.rst b/doc/developer-guide/api/functions/TSContThreadAffinityGet.en.rst
similarity index 62%
copy from doc/developer-guide/api/types/TSThreadPool.en.rst
copy to doc/developer-guide/api/functions/TSContThreadAffinityGet.en.rst
index e7140a9..c31363c 100644
--- a/doc/developer-guide/api/types/TSThreadPool.en.rst
+++ b/doc/developer-guide/api/functions/TSContThreadAffinityGet.en.rst
@@ -16,37 +16,19 @@
 
 .. include:: ../../../common.defs
 
-TSThreadPool
-************
+.. default-domain:: c
+
+TSContThreadAffinityGet
+**************
 
 Synopsis
 ========
 
-`#include <ts/apidefs.h>`
-
-.. c:type:: TSThreadPool
-
-Enum typedef.
-
-Enumeration Members
-===================
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DEFAULT
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_NET
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_TASK
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_SSL
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DNS
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_REMAP
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_CLUSTER
+`#include <ts/ts.h>`
 
-.. c:member:: TSThreadPool TS_THREAD_POOL_UDP
+.. function:: TSEventThread TSContThreadAffinityGet(TSCont contp)
 
 Description
 ===========
 
+Get the thread affinity of continuation :arg:`contp`.
diff --git a/doc/developer-guide/api/types/TSThreadPool.en.rst b/doc/developer-guide/api/functions/TSContThreadAffinitySet.en.rst
similarity index 59%
copy from doc/developer-guide/api/types/TSThreadPool.en.rst
copy to doc/developer-guide/api/functions/TSContThreadAffinitySet.en.rst
index e7140a9..cae2903 100644
--- a/doc/developer-guide/api/types/TSThreadPool.en.rst
+++ b/doc/developer-guide/api/functions/TSContThreadAffinitySet.en.rst
@@ -16,37 +16,27 @@
 
 .. include:: ../../../common.defs
 
-TSThreadPool
-************
+.. default-domain:: c
+
+TSContThreadAffinitySet
+**************
 
 Synopsis
 ========
 
-`#include <ts/apidefs.h>`
-
-.. c:type:: TSThreadPool
-
-Enum typedef.
-
-Enumeration Members
-===================
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DEFAULT
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_NET
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_TASK
+`#include <ts/ts.h>`
 
-.. c:member:: TSThreadPool TS_THREAD_POOL_SSL
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_DNS
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_REMAP
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_CLUSTER
-
-.. c:member:: TSThreadPool TS_THREAD_POOL_UDP
+.. function:: TSReturnCode TSContThreadAffinitySet(TSCont contp, TSEventThread ethread)
 
 Description
 ===========
 
+Set the thread affinity of continuation :arg:`contp` to :arg:`ethread`. Future calls to
+:func:`TSContSchedule`, and :func:`TSContScheduleOnPool` that has the same type as :arg:`ethread`
+will schedule the continuation on :arg:`ethread`, rather than an arbitrary thread of that type.
+
+Return Values
+=============
+
+:data:`TS_SUCCESS` if thread affinity of continuation :arg:`contp` was set to :arg:`ethread`,
+:data:`TS_ERROR` if not.
diff --git a/doc/developer-guide/api/types/TSThreadPool.en.rst b/doc/developer-guide/api/types/TSThreadPool.en.rst
index e7140a9..298a850 100644
--- a/doc/developer-guide/api/types/TSThreadPool.en.rst
+++ b/doc/developer-guide/api/types/TSThreadPool.en.rst
@@ -31,8 +31,6 @@ Enum typedef.
 Enumeration Members
 ===================
 
-.. c:member:: TSThreadPool TS_THREAD_POOL_DEFAULT
-
 .. c:member:: TSThreadPool TS_THREAD_POOL_NET
 
 .. c:member:: TSThreadPool TS_THREAD_POOL_TASK
diff --git a/doc/developer-guide/plugins/actions/index.en.rst b/doc/developer-guide/plugins/actions/index.en.rst
index afe6ee1..7ea2561 100644
--- a/doc/developer-guide/plugins/actions/index.en.rst
+++ b/doc/developer-guide/plugins/actions/index.en.rst
@@ -97,7 +97,7 @@ Below is an example of typical usage for an action:
                system is initialized. We'll simply schedule an event
                on the continuation to occur as soon as the rest of
                the system is started up. */
-            TSContSchedule (contp, 0, TS_THREAD_POOL_DEFAULT);
+            TSContScheduleOnPool (contp, 0, TS_THREAD_POOL_NET);
         }
 
 The example above shows a simple plugin that creates a continuation and
@@ -127,7 +127,7 @@ cancel the action. The following sample code implements this:
         {
             switch (event) {
                 case (TS_EVENT_IMMEDIATE):
-                    TSContSchedule (contp, 30000, TS_THREAD_POOL_DEFAULT);
+                    TSContScheduleOnPool (contp, 30000, TS_THREAD_POOL_NET);
                     TSAction actionp = TSNetConnect(contp, 127.0.0.1, 9999);
                     if (!TSActionDone (actionp)) {
                         TSContDataSet (contp, actionp);
@@ -168,7 +168,7 @@ cancel the action. The following sample code implements this:
                system is initialized. We'll simply schedule an event
                on the continuation to occur as soon as the rest of
                the system is started up. */
-            TSContSchedule (contp, 0, TS_THREAD_POOL_DEFAULT);
+            TSContScheduleOnPool (contp, 0, TS_THREAD_POOL_NET);
         }
 
 The action functions are:
diff --git a/doc/developer-guide/plugins/continuations/writing-handler-functions.en.rst b/doc/developer-guide/plugins/continuations/writing-handler-functions.en.rst
index b4cde81..7250f25 100644
--- a/doc/developer-guide/plugins/continuations/writing-handler-functions.en.rst
+++ b/doc/developer-guide/plugins/continuations/writing-handler-functions.en.rst
@@ -96,6 +96,8 @@ Event                                        Event Sender
 :data:`TS_EVENT_IMMEDIATE`                   :func:`TSVConnClose`
                                              :func:`TSVIOReenable`
                                              :func:`TSContSchedule`
+                                             :func:`TSContScheduleOnPool`
+                                             :func:`TSContScheduleOnThread`
 :data:`TS_EVENT_IMMEDIATE`                   :data:`TS_HTTP_REQUEST_TRANSFORM_HOOK`
 :data:`TS_EVENT_IMMEDIATE`                   :data:`TS_HTTP_RESPONSE_TRANSFORM_HOOK`
 :data:`TS_EVENT_CACHE_OPEN_READ`             :func:`TSCacheRead`                         Cache VC
@@ -112,6 +114,8 @@ Event                                        Event Sender
                                              :func:`TSHttpTxnIntercept`
 :data:`TS_EVENT_HOST_LOOKUP`                 :func:`TSHostLookup`                        :type:`TSHostLookupResult`
 :data:`TS_EVENT_TIMEOUT`                     :func:`TSContSchedule`
+                                             :func:`TSContScheduleOnPool`
+                                             :func:`TSContScheduleOnThread`
 :data:`TS_EVENT_ERROR`
 :data:`TS_EVENT_VCONN_READ_READY`            :func:`TSVConnRead`                         :type:`TSVIO`
 :data:`TS_EVENT_VCONN_WRITE_READY`           :func:`TSVConnWrite`                        :type:`TSVIO`
@@ -134,3 +138,5 @@ The continuation functions are listed below:
 -  :func:`TSContDestroy`
 -  :func:`TSContMutexGet`
 -  :func:`TSContSchedule`
+-  :func:`TSContScheduleOnPool`
+-  :func:`TSContScheduleOnThread`
diff --git a/example/blacklist_1/blacklist_1.c b/example/blacklist_1/blacklist_1.c
index 34703a2..7b08186 100644
--- a/example/blacklist_1/blacklist_1.c
+++ b/example/blacklist_1/blacklist_1.c
@@ -100,7 +100,7 @@ handle_dns(TSHttpTxn txnp, TSCont contp)
     TSDebug(PLUGIN_NAME, "Unable to get lock. Will retry after some time");
     TSHandleMLocRelease(bufp, hdr_loc, url_loc);
     TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc);
-    TSContSchedule(contp, RETRY_TIME, TS_THREAD_POOL_DEFAULT);
+    TSContScheduleOnPool(contp, RETRY_TIME, TS_THREAD_POOL_NET);
     return;
   }
 
@@ -188,7 +188,7 @@ read_blacklist(TSCont contp)
     if (file != NULL) {
       TSfclose(file);
     }
-    TSContSchedule(contp, RETRY_TIME, TS_THREAD_POOL_DEFAULT);
+    TSContScheduleOnPool(contp, RETRY_TIME, TS_THREAD_POOL_NET);
     return;
   }
 
diff --git a/example/thread_pool/psi.c b/example/thread_pool/psi.c
index fa465b2..29db012 100644
--- a/example/thread_pool/psi.c
+++ b/example/thread_pool/psi.c
@@ -511,7 +511,7 @@ psi_include(TSCont contp, void *edata ATS_UNUSED)
      TS_HTTP_READ_REQUEST_HDR, TS_HTTP_OS_DNS and so on...) we could
      use TSHttpTxnReenable to wake up the transaction instead of sending an event. */
 
-  TSContSchedule(contp, 0, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
   data->psi_success = 0;
   data->state       = STATE_READ_DATA;
   TSMutexUnlock(TSContMutexGet(contp));
@@ -749,7 +749,7 @@ transform_handler(TSCont contp, TSEvent event, void *edata ATS_UNUSED)
     d->contp = contp;
     d->event = event;
     TSContDataSet(c, d);
-    TSContSchedule(c, 10, TS_THREAD_POOL_DEFAULT);
+    TSContScheduleOnPool(c, 10, TS_THREAD_POOL_NET);
     return 1;
   }
 
@@ -765,7 +765,7 @@ transform_handler(TSCont contp, TSEvent event, void *edata ATS_UNUSED)
        the continuation right away as the thread will call us back
        on this continuation. */
     if (state == STATE_READ_PSI) {
-      TSContSchedule(contp, 10, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 10, TS_THREAD_POOL_NET);
     } else {
       TSMutexUnlock(TSContMutexGet(contp));
       cont_data_destroy(TSContDataGet(contp));
diff --git a/include/ts/apidefs.h.in b/include/ts/apidefs.h.in
index 5e7bf7c..7adf619 100644
--- a/include/ts/apidefs.h.in
+++ b/include/ts/apidefs.h.in
@@ -823,7 +823,6 @@ typedef enum {
 /* The TASK pool of threads is the primary method of off-loading continuations from the
    net-threads. Configure this with proxy.config.task_threads in records.config. */
 typedef enum {
-  TS_THREAD_POOL_DEFAULT = -1,
   TS_THREAD_POOL_NET,
   TS_THREAD_POOL_TASK,
   /* unlikely you should use these */
@@ -901,6 +900,7 @@ typedef struct tsapi_cachetxn *TSCacheTxn;
 typedef struct tsapi_port *TSPortDescriptor;
 typedef struct tsapi_vio *TSVIO;
 typedef struct tsapi_thread *TSThread;
+typedef struct tsapi_event_thread *TSEventThread;
 typedef struct tsapi_x509 *TSSslX509;
 typedef struct tsapi_mutex *TSMutex;
 typedef struct tsapi_config *TSConfig;
diff --git a/include/ts/ts.h b/include/ts/ts.h
index a3ba7b3..d5ee8b0 100644
--- a/include/ts/ts.h
+++ b/include/ts/ts.h
@@ -1108,6 +1108,7 @@ tsapi TSThread TSThreadInit(void);
 tsapi void TSThreadDestroy(TSThread thread);
 tsapi void TSThreadWait(TSThread thread);
 tsapi TSThread TSThreadSelf(void);
+tsapi TSEventThread TSEventThreadSelf(void);
 
 /* --------------------------------------------------------------------------
    Mutexes */
@@ -1188,8 +1189,15 @@ tsapi TSCont TSContCreate(TSEventFunc funcp, TSMutex mutexp);
 tsapi void TSContDestroy(TSCont contp);
 tsapi void TSContDataSet(TSCont contp, void *data);
 tsapi void *TSContDataGet(TSCont contp);
-tsapi TSAction TSContSchedule(TSCont contp, TSHRTime timeout, TSThreadPool tp);
-tsapi TSAction TSContScheduleEvery(TSCont contp, TSHRTime every /* millisecs */, TSThreadPool tp);
+tsapi TSAction TSContSchedule(TSCont contp, TSHRTime timeout);
+tsapi TSAction TSContScheduleOnPool(TSCont contp, TSHRTime timeout, TSThreadPool tp);
+tsapi TSAction TSContScheduleOnThread(TSCont contp, TSHRTime timeout, TSEventThread ethread);
+tsapi TSAction TSContScheduleEvery(TSCont contp, TSHRTime every /* millisecs */);
+tsapi TSAction TSContScheduleEveryOnPool(TSCont contp, TSHRTime every /* millisecs */, TSThreadPool tp);
+tsapi TSAction TSContScheduleEveryOnThread(TSCont contp, TSHRTime every /* millisecs */, TSEventThread ethread);
+tsapi TSReturnCode TSContThreadAffinitySet(TSCont contp, TSEventThread ethread);
+tsapi TSEventThread TSContThreadAffinityGet(TSCont contp);
+tsapi void TSContThreadAffinityClear(TSCont contp);
 tsapi TSAction TSHttpSchedule(TSCont contp, TSHttpTxn txnp, TSHRTime timeout);
 tsapi int TSContCall(TSCont contp, TSEvent event, void *edata);
 tsapi TSMutex TSContMutexGet(TSCont contp);
diff --git a/include/tscpp/api/AsyncTimer.h b/include/tscpp/api/AsyncTimer.h
index 85df2b5..3e7658a 100644
--- a/include/tscpp/api/AsyncTimer.h
+++ b/include/tscpp/api/AsyncTimer.h
@@ -68,7 +68,7 @@ public:
   // For convenience, additional constructor prototypes.
 
   AsyncTimer(Type type, int period_in_ms, int initial_period_in_ms = 0)
-    : AsyncTimer(type, period_in_ms, initial_period_in_ms, TS_THREAD_POOL_DEFAULT)
+    : AsyncTimer(type, period_in_ms, initial_period_in_ms, TS_THREAD_POOL_NET)
   {
   }
 
diff --git a/include/tscpp/api/Continuation.h b/include/tscpp/api/Continuation.h
index edb6345..62e40a1 100644
--- a/include/tscpp/api/Continuation.h
+++ b/include/tscpp/api/Continuation.h
@@ -111,9 +111,9 @@ public:
   // Timeout of zero means no timeout.
   //
   Action
-  schedule(TSHRTime timeout = 0, TSThreadPool tp = TS_THREAD_POOL_DEFAULT)
+  schedule(TSHRTime timeout = 0, TSThreadPool tp = TS_THREAD_POOL_NET)
   {
-    return TSContSchedule(_cont, timeout, tp);
+    return TSContScheduleOnPool(_cont, timeout, tp);
   }
 
   // Timeout of zero means no timeout.
@@ -125,9 +125,9 @@ public:
   }
 
   Action
-  scheduleEvery(TSHRTime interval /* milliseconds */, TSThreadPool tp = TS_THREAD_POOL_DEFAULT)
+  scheduleEvery(TSHRTime interval /* milliseconds */, TSThreadPool tp = TS_THREAD_POOL_NET)
   {
-    return TSContScheduleEvery(_cont, interval, tp);
+    return TSContScheduleEveryOnPool(_cont, interval, tp);
   }
 
 protected:
diff --git a/iocore/eventsystem/I_Continuation.h b/iocore/eventsystem/I_Continuation.h
index 140a11d..abb1d7d 100644
--- a/iocore/eventsystem/I_Continuation.h
+++ b/iocore/eventsystem/I_Continuation.h
@@ -48,6 +48,7 @@ class EThread;
 class Event;
 
 extern EThread *this_ethread();
+extern EThread *this_event_thread();
 
 //////////////////////////////////////////////////////////////////////////////
 //
@@ -144,6 +145,30 @@ public:
   */
   ContFlags control_flags;
 
+  EThread *thread_affinity = this_event_thread();
+
+  bool
+  setThreadAffinity(EThread *ethread)
+  {
+    if (ethread != nullptr) {
+      thread_affinity = ethread;
+      return true;
+    }
+    return false;
+  }
+
+  EThread *
+  getThreadAffinity()
+  {
+    return thread_affinity;
+  }
+
+  void
+  clearThreadAffinity()
+  {
+    thread_affinity = nullptr;
+  }
+
   /**
     Receives the event code and data for an Event.
 
diff --git a/iocore/eventsystem/I_EThread.h b/iocore/eventsystem/I_EThread.h
index 8ce926a..f80456e 100644
--- a/iocore/eventsystem/I_EThread.h
+++ b/iocore/eventsystem/I_EThread.h
@@ -325,6 +325,8 @@ public:
   bool is_event_type(EventType et);
   void set_event_type(EventType et);
 
+  bool has_event_loop = false;
+
   // Private Interface
 
   void execute() override;
diff --git a/iocore/eventsystem/P_UnixEThread.h b/iocore/eventsystem/P_UnixEThread.h
index 9328c3b..042757b 100644
--- a/iocore/eventsystem/P_UnixEThread.h
+++ b/iocore/eventsystem/P_UnixEThread.h
@@ -179,6 +179,17 @@ this_ethread()
   return (EThread *)this_thread();
 }
 
+TS_INLINE EThread *
+this_event_thread()
+{
+  EThread *ethread = this_ethread();
+  if (ethread != nullptr && ethread->has_event_loop) {
+    return ethread;
+  } else {
+    return nullptr;
+  }
+}
+
 TS_INLINE void
 EThread::free_event(Event *e)
 {
diff --git a/iocore/eventsystem/P_UnixEventProcessor.h b/iocore/eventsystem/P_UnixEventProcessor.h
index ed97729..90f08b9 100644
--- a/iocore/eventsystem/P_UnixEventProcessor.h
+++ b/iocore/eventsystem/P_UnixEventProcessor.h
@@ -65,7 +65,14 @@ TS_INLINE Event *
 EventProcessor::schedule(Event *e, EventType etype, bool fast_signal)
 {
   ink_assert(etype < MAX_EVENT_TYPES);
-  e->ethread = assign_thread(etype);
+
+  EThread *ethread = e->continuation->getThreadAffinity();
+  if (ethread != nullptr && ethread->is_event_type(etype)) {
+    e->ethread = ethread;
+  } else {
+    e->ethread = assign_thread(etype);
+  }
+
   if (e->continuation->mutex) {
     e->mutex = e->continuation->mutex;
   } else {
diff --git a/iocore/eventsystem/UnixEThread.cc b/iocore/eventsystem/UnixEThread.cc
index 06920db..e8586fa 100644
--- a/iocore/eventsystem/UnixEThread.cc
+++ b/iocore/eventsystem/UnixEThread.cc
@@ -208,6 +208,7 @@ EThread::execute_regular()
   static EventMetrics METRIC_INIT;
 
   // give priority to immediate events
+  has_event_loop = true;
   for (;;) {
     if (unlikely(shutdown_event_system == true)) {
       return;
diff --git a/plugins/background_fetch/background_fetch.cc b/plugins/background_fetch/background_fetch.cc
index db1c875..394a3dc 100644
--- a/plugins/background_fetch/background_fetch.cc
+++ b/plugins/background_fetch/background_fetch.cc
@@ -296,7 +296,7 @@ BgFetchData::schedule()
   resp_io_buf_reader = TSIOBufferReaderAlloc(resp_io_buf);
 
   // Schedule
-  TSContSchedule(_cont, 0, TS_THREAD_POOL_NET);
+  TSContScheduleOnPool(_cont, 0, TS_THREAD_POOL_NET);
 }
 
 // Log format is:
diff --git a/plugins/esi/esi.cc b/plugins/esi/esi.cc
index 9aa8d61..db00073 100644
--- a/plugins/esi/esi.cc
+++ b/plugins/esi/esi.cc
@@ -1012,7 +1012,7 @@ transformHandler(TSCont contp, TSEvent event, void *edata)
       // lock on our continuation which will fail if we destroy
       // ourselves right now
       TSDebug(cont_debug_tag, "[%s] Deferring shutdown as data event was just processed", __FUNCTION__);
-      TSContSchedule(contp, 10, TS_THREAD_POOL_TASK);
+      TSContScheduleOnPool(contp, 10, TS_THREAD_POOL_TASK);
     } else {
       goto lShutdown;
     }
diff --git a/plugins/experimental/certifier/certifier.cc b/plugins/experimental/certifier/certifier.cc
index 9415d75..d2788f8 100644
--- a/plugins/experimental/certifier/certifier.cc
+++ b/plugins/experimental/certifier/certifier.cc
@@ -568,7 +568,7 @@ cert_retriever(TSCont contp, TSEvent event, void *edata)
     TSDebug(PLUGIN_NAME, "cert_retriever(): schedule thread to generate/retrieve cert for %s", servername);
     TSCont schedule_cont = TSContCreate(shadow_cert_generator, TSMutexCreate());
     TSContDataSet(schedule_cont, (void *)servername);
-    TSContSchedule(schedule_cont, 0, TS_THREAD_POOL_TASK);
+    TSContScheduleOnPool(schedule_cont, 0, TS_THREAD_POOL_TASK);
   } else {
     // Use existing context
     TSDebug(PLUGIN_NAME, "cert_retriever(): Reuse existing cert and context for %s", servername);
diff --git a/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc b/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc
index d9165be..28561e5 100644
--- a/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc
+++ b/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc
@@ -210,7 +210,7 @@ on_send_response_header(RequestData *req, TSHttpTxn &txnp, TSCont &contp)
     if (delay_request) {
       req->wl_retry++;
       TSDebug(DEBUG_TAG, "delaying request, url@%p: {{%s}} on retry: %d time", txnp, req->req_url.c_str(), req->wl_retry);
-      TSContSchedule(contp, OPEN_WRITE_FAIL_REQ_DELAY_TIMEOUT, TS_THREAD_POOL_TASK);
+      TSContScheduleOnPool(contp, OPEN_WRITE_FAIL_REQ_DELAY_TIMEOUT, TS_THREAD_POOL_TASK);
       TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc);
       return TS_SUCCESS;
     }
diff --git a/plugins/experimental/header_freq/header_freq.cc b/plugins/experimental/header_freq/header_freq.cc
index 6de6620..d696a1d 100644
--- a/plugins/experimental/header_freq/header_freq.cc
+++ b/plugins/experimental/header_freq/header_freq.cc
@@ -194,7 +194,7 @@ handle_hook(TSCont contp, TSEvent event, void *edata)
         TSDebug(DEBUG_TAG_HOOK, "Scheduled execution of '%s' command", CONTROL_MSG_LOG);
         TSCont c = TSContCreate(CB_Command_Log, TSMutexCreate());
         TSContDataSet(c, new std::string(static_cast<const char *>(msgp->data), msgp->data_size));
-        TSContSchedule(c, 0, TS_THREAD_POOL_TASK);
+        TSContScheduleOnPool(c, 0, TS_THREAD_POOL_TASK);
       } else {
         TSError("[%s] Unknown command '%.*s'", PLUGIN_NAME, static_cast<int>(msgp->data_size),
                 static_cast<const char *>(msgp->data));
diff --git a/plugins/experimental/inliner/ts.cc b/plugins/experimental/inliner/ts.cc
index 3c332cc..be9684e 100644
--- a/plugins/experimental/inliner/ts.cc
+++ b/plugins/experimental/inliner/ts.cc
@@ -123,7 +123,7 @@ namespace io
     assert(vio_ != nullptr);
 
     if (timeout_ > 0) {
-      action_ = TSContSchedule(continuation_, timeout_, TS_THREAD_POOL_DEFAULT);
+      action_ = TSContScheduleOnPool(continuation_, timeout_, TS_THREAD_POOL_NET);
       assert(action_ != nullptr);
     }
   }
diff --git a/plugins/experimental/prefetch/fetch.cc b/plugins/experimental/prefetch/fetch.cc
index ca7daf0..d63e3af 100644
--- a/plugins/experimental/prefetch/fetch.cc
+++ b/plugins/experimental/prefetch/fetch.cc
@@ -596,7 +596,7 @@ BgFetch::schedule()
   /* Schedule */
   PrefetchDebug("schedule fetch: %s", _url.c_str());
   _startTime = TShrtime();
-  TSContSchedule(_cont, 0, TS_THREAD_POOL_NET);
+  TSContScheduleOnPool(_cont, 0, TS_THREAD_POOL_NET);
 }
 
 /* Log format is: name-space bytes status url */
diff --git a/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c b/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c
index c49eb40..fdcc466 100644
--- a/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c
+++ b/plugins/experimental/stale_while_revalidate/stale_while_revalidate.c
@@ -597,7 +597,7 @@ main_plugin(TSCont cont, TSEvent event, void *edata)
 
           fetch_cont = TSContCreate(fetch_resource, TSMutexCreate());
           TSContDataSet(fetch_cont, (void *)state);
-          TSContSchedule(fetch_cont, 0, TS_THREAD_POOL_NET);
+          TSContScheduleOnPool(fetch_cont, 0, TS_THREAD_POOL_NET);
           TSHttpTxnReenable(txn, TS_EVENT_HTTP_CONTINUE);
         } else if ((state->txn_start - chi->date) < (chi->max_age + chi->stale_on_error)) {
           TSDebug(PLUGIN_NAME, "Looks like we can return fresh data on 500 error");
@@ -609,7 +609,7 @@ main_plugin(TSCont cont, TSEvent event, void *edata)
           state->main_cont = cont; // we need this for the warning header callback. not sure i like it, but it works.
           fetch_cont       = TSContCreate(fetch_resource, TSMutexCreate());
           TSContDataSet(fetch_cont, (void *)state);
-          TSContSchedule(fetch_cont, 0, TS_THREAD_POOL_NET);
+          TSContScheduleOnPool(fetch_cont, 0, TS_THREAD_POOL_NET);
         } else {
           TSDebug(PLUGIN_NAME, "No love? now: %d date: %d max-age: %d swr: %d soe: %d", (int)state->txn_start, (int)chi->date,
                   (int)chi->max_age, (int)chi->stale_while_revalidate, (int)chi->stale_on_error);
diff --git a/plugins/experimental/system_stats/system_stats.c b/plugins/experimental/system_stats/system_stats.c
index d25dada..44c91b8 100644
--- a/plugins/experimental/system_stats/system_stats.c
+++ b/plugins/experimental/system_stats/system_stats.c
@@ -224,7 +224,7 @@ systemStatsContCB(TSCont cont, TSEvent event ATS_UNUSED, void *edata)
   stat_creation_mutex = TSContMutexGet(cont);
   getStats(stat_creation_mutex);
 
-  TSContSchedule(cont, SYSTEM_STATS_TIMEOUT, TS_THREAD_POOL_TASK);
+  TSContScheduleOnPool(cont, SYSTEM_STATS_TIMEOUT, TS_THREAD_POOL_TASK);
   TSDebug(DEBUG_TAG, "finished %s", __FUNCTION__);
 
   return 0;
@@ -253,7 +253,7 @@ TSPluginInit(int argc, const char *argv[])
   // We want our first hit immediate to populate the stats,
   // Subsequent schedules done within the function will be for
   // 5 seconds.
-  TSContSchedule(stats_cont, 0, TS_THREAD_POOL_TASK);
+  TSContScheduleOnPool(stats_cont, 0, TS_THREAD_POOL_TASK);
 
   TSDebug(DEBUG_TAG, "Init complete");
 }
diff --git a/plugins/generator/generator.cc b/plugins/generator/generator.cc
index 5f282cd..8eee22f 100644
--- a/plugins/generator/generator.cc
+++ b/plugins/generator/generator.cc
@@ -500,7 +500,7 @@ GeneratorInterceptionHook(TSCont contp, TSEvent event, void *edata)
 
         if (cdata.grq->delay > 0) {
           VDEBUG("delaying response by %ums", cdata.grq->delay);
-          TSContSchedule(contp, cdata.grq->delay, TS_THREAD_POOL_NET);
+          TSContScheduleOnPool(contp, cdata.grq->delay, TS_THREAD_POOL_NET);
           return TS_EVENT_NONE;
         }
 
diff --git a/plugins/lua/ts_lua_http_intercept.c b/plugins/lua/ts_lua_http_intercept.c
index 048f52d..d794231 100644
--- a/plugins/lua/ts_lua_http_intercept.c
+++ b/plugins/lua/ts_lua_http_intercept.c
@@ -406,7 +406,7 @@ ts_lua_flush_wakeup(ts_lua_http_intercept_ctx *ictx)
   ci = &ictx->cinfo;
 
   contp  = TSContCreate(ts_lua_flush_wakeup_handler, ci->mutex);
-  action = TSContSchedule(contp, 0, TS_THREAD_POOL_DEFAULT);
+  action = TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
 
   ai = ts_lua_async_create_item(contp, ts_lua_flush_cleanup, (void *)action, ci);
   TSContDataSet(contp, ai);
diff --git a/plugins/lua/ts_lua_misc.c b/plugins/lua/ts_lua_misc.c
index 8da3a16..e12e5a1 100644
--- a/plugins/lua/ts_lua_misc.c
+++ b/plugins/lua/ts_lua_misc.c
@@ -211,7 +211,7 @@ ts_lua_schedule(lua_State *L)
   nci->contp = contp;
   nci->mutex = ci->mutex;
 
-  TSContSchedule(contp, sec * 1000, entry);
+  TSContScheduleOnPool(contp, sec * 1000, entry);
 
   return 0;
 }
@@ -287,7 +287,7 @@ ts_lua_sleep(lua_State *L)
   }
 
   contp  = TSContCreate(ts_lua_sleep_handler, ci->mutex);
-  action = TSContSchedule(contp, sec * 1000, TS_THREAD_POOL_DEFAULT);
+  action = TSContScheduleOnPool(contp, sec * 1000, TS_THREAD_POOL_NET);
 
   ai = ts_lua_async_create_item(contp, ts_lua_sleep_cleanup, (void *)action, ci);
   TSContDataSet(contp, ai);
diff --git a/plugins/regex_revalidate/regex_revalidate.c b/plugins/regex_revalidate/regex_revalidate.c
index 0a697d7..d7ff706 100644
--- a/plugins/regex_revalidate/regex_revalidate.c
+++ b/plugins/regex_revalidate/regex_revalidate.c
@@ -366,7 +366,7 @@ config_handler(TSCont cont, TSEvent event, void *edata)
     if (iptr) {
       free_cont = TSContCreate(free_handler, TSMutexCreate());
       TSContDataSet(free_cont, (void *)iptr);
-      TSContSchedule(free_cont, FREE_TMOUT, TS_THREAD_POOL_TASK);
+      TSContScheduleOnPool(free_cont, FREE_TMOUT, TS_THREAD_POOL_TASK);
     }
   } else {
     TSDebug(LOG_PREFIX, "No Changes");
@@ -379,7 +379,7 @@ config_handler(TSCont cont, TSEvent event, void *edata)
 
   // Don't reschedule for TS_EVENT_MGMT_UPDATE
   if (event == TS_EVENT_TIMEOUT) {
-    TSContSchedule(cont, CONFIG_TMOUT, TS_THREAD_POOL_TASK);
+    TSContScheduleOnPool(cont, CONFIG_TMOUT, TS_THREAD_POOL_TASK);
   }
   return 0;
 }
@@ -562,7 +562,7 @@ TSPluginInit(int argc, const char *argv[])
   TSMgmtUpdateRegister(config_cont, LOG_PREFIX);
 
   if (!disable_timed_reload) {
-    TSContSchedule(config_cont, CONFIG_TMOUT, TS_THREAD_POOL_TASK);
+    TSContScheduleOnPool(config_cont, CONFIG_TMOUT, TS_THREAD_POOL_TASK);
   }
 
   TSDebug(LOG_PREFIX, "Plugin Init Complete");
diff --git a/src/traffic_server/InkAPI.cc b/src/traffic_server/InkAPI.cc
index b92e42c..55a8c13 100644
--- a/src/traffic_server/InkAPI.cc
+++ b/src/traffic_server/InkAPI.cc
@@ -4399,16 +4399,45 @@ TSContDataGet(TSCont contp)
 }
 
 TSAction
-TSContSchedule(TSCont contp, ink_hrtime timeout, TSThreadPool tp)
+TSContSchedule(TSCont contp, TSHRTime timeout)
 {
   sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
 
   FORCE_PLUGIN_SCOPED_MUTEX(contp);
 
-  INKContInternal *i = (INKContInternal *)contp;
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  if (ink_atomic_increment(static_cast<int *>(&i->m_event_count), 1) < 0) {
+    ink_assert(!"not reached");
+  }
+
+  EThread *eth = i->getThreadAffinity();
+  if (eth == nullptr) {
+    return nullptr;
+  }
+
   TSAction action;
+  if (timeout == 0) {
+    action = reinterpret_cast<TSAction>(eth->schedule_imm(i));
+  } else {
+    action = reinterpret_cast<TSAction>(eth->schedule_in(i, HRTIME_MSECONDS(timeout)));
+  }
 
-  if (ink_atomic_increment((int *)&i->m_event_count, 1) < 0) {
+  /* This is a hack. Should be handled in ink_types */
+  action = (TSAction)((uintptr_t)action | 0x1);
+  return action;
+}
+
+TSAction
+TSContScheduleOnPool(TSCont contp, TSHRTime timeout, TSThreadPool tp)
+{
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  if (ink_atomic_increment(static_cast<int *>(&i->m_event_count), 1) < 0) {
     ink_assert(!"not reached");
   }
 
@@ -4416,7 +4445,6 @@ TSContSchedule(TSCont contp, ink_hrtime timeout, TSThreadPool tp)
 
   switch (tp) {
   case TS_THREAD_POOL_NET:
-  case TS_THREAD_POOL_DEFAULT:
     etype = ET_NET;
     break;
   case TS_THREAD_POOL_TASK:
@@ -4439,28 +4467,82 @@ TSContSchedule(TSCont contp, ink_hrtime timeout, TSThreadPool tp)
     break;
   }
 
+  TSAction action;
   if (timeout == 0) {
     action = reinterpret_cast<TSAction>(eventProcessor.schedule_imm(i, etype));
   } else {
     action = reinterpret_cast<TSAction>(eventProcessor.schedule_in(i, HRTIME_MSECONDS(timeout), etype));
   }
 
-  /* This is a hack. SHould be handled in ink_types */
+  /* This is a hack. Should be handled in ink_types */
   action = (TSAction)((uintptr_t)action | 0x1);
   return action;
 }
 
 TSAction
-TSContScheduleEvery(TSCont contp, ink_hrtime every, TSThreadPool tp)
+TSContScheduleOnThread(TSCont contp, TSHRTime timeout, TSEventThread ethread)
 {
+  ink_release_assert(ethread != nullptr);
+
   sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
 
   FORCE_PLUGIN_SCOPED_MUTEX(contp);
 
-  INKContInternal *i = (INKContInternal *)contp;
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  if (ink_atomic_increment(static_cast<int *>(&i->m_event_count), 1) < 0) {
+    ink_assert(!"not reached");
+  }
+
+  EThread *eth = reinterpret_cast<EThread *>(ethread);
+
   TSAction action;
+  if (timeout == 0) {
+    action = reinterpret_cast<TSAction>(eth->schedule_imm(i));
+  } else {
+    action = reinterpret_cast<TSAction>(eth->schedule_in(i, HRTIME_MSECONDS(timeout)));
+  }
 
-  if (ink_atomic_increment((int *)&i->m_event_count, 1) < 0) {
+  /* This is a hack. Should be handled in ink_types */
+  action = (TSAction)((uintptr_t)action | 0x1);
+  return action;
+}
+
+TSAction
+TSContScheduleEvery(TSCont contp, TSHRTime every /* millisecs */)
+{
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  if (ink_atomic_increment(static_cast<int *>(&i->m_event_count), 1) < 0) {
+    ink_assert(!"not reached");
+  }
+
+  EThread *eth = i->getThreadAffinity();
+  if (eth == nullptr) {
+    return nullptr;
+  }
+
+  TSAction action = reinterpret_cast<TSAction>(eth->schedule_every(i, HRTIME_MSECONDS(every)));
+
+  /* This is a hack. Should be handled in ink_types */
+  action = (TSAction)((uintptr_t)action | 0x1);
+  return action;
+}
+
+TSAction
+TSContScheduleEveryOnPool(TSCont contp, TSHRTime every, TSThreadPool tp)
+{
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  if (ink_atomic_increment(static_cast<int *>(&i->m_event_count), 1) < 0) {
     ink_assert(!"not reached");
   }
 
@@ -4468,7 +4550,6 @@ TSContScheduleEvery(TSCont contp, ink_hrtime every, TSThreadPool tp)
 
   switch (tp) {
   case TS_THREAD_POOL_NET:
-  case TS_THREAD_POOL_DEFAULT:
     etype = ET_NET;
     break;
   case TS_THREAD_POOL_TASK:
@@ -4479,15 +4560,81 @@ TSContScheduleEvery(TSCont contp, ink_hrtime every, TSThreadPool tp)
     break;
   }
 
-  action = reinterpret_cast<TSAction>(eventProcessor.schedule_every(i, HRTIME_MSECONDS(every), etype));
+  TSAction action = reinterpret_cast<TSAction>(eventProcessor.schedule_every(i, HRTIME_MSECONDS(every), etype));
+
+  /* This is a hack. Should be handled in ink_types */
+  action = (TSAction)((uintptr_t)action | 0x1);
+  return action;
+}
+
+TSAction
+TSContScheduleEveryOnThread(TSCont contp, TSHRTime every /* millisecs */, TSEventThread ethread)
+{
+  ink_release_assert(ethread != nullptr);
+
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  if (ink_atomic_increment(static_cast<int *>(&i->m_event_count), 1) < 0) {
+    ink_assert(!"not reached");
+  }
+
+  EThread *eth = reinterpret_cast<EThread *>(ethread);
+
+  TSAction action = reinterpret_cast<TSAction>(eth->schedule_every(i, HRTIME_MSECONDS(every)));
 
-  /* This is a hack. SHould be handled in ink_types */
+  /* This is a hack. Should be handled in ink_types */
   action = (TSAction)((uintptr_t)action | 0x1);
   return action;
 }
 
+TSReturnCode
+TSContThreadAffinitySet(TSCont contp, TSEventThread ethread)
+{
+  ink_release_assert(ethread != nullptr);
+
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i       = reinterpret_cast<INKContInternal *>(contp);
+  EThread *thread_affinity = reinterpret_cast<EThread *>(ethread);
+
+  if (i->setThreadAffinity(thread_affinity)) {
+    return TS_SUCCESS;
+  }
+  return TS_ERROR;
+}
+
+TSEventThread
+TSContThreadAffinityGet(TSCont contp)
+{
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  return reinterpret_cast<TSEventThread>(i->getThreadAffinity());
+}
+
+void
+TSContThreadAffinityClear(TSCont contp)
+{
+  sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
+
+  FORCE_PLUGIN_SCOPED_MUTEX(contp);
+
+  INKContInternal *i = reinterpret_cast<INKContInternal *>(contp);
+
+  i->clearThreadAffinity();
+}
+
 TSAction
-TSHttpSchedule(TSCont contp, TSHttpTxn txnp, ink_hrtime timeout)
+TSHttpSchedule(TSCont contp, TSHttpTxn txnp, TSHRTime timeout)
 {
   sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS);
 
@@ -6412,7 +6559,7 @@ TSActionCancel(TSAction actionp)
   Action *a;
   INKContInternal *i;
 
-  /* This is a hack. SHould be handled in ink_types */
+  /* This is a hack. Should be handled in ink_types */
   if ((uintptr_t)actionp & 0x1) {
     a = (Action *)((uintptr_t)actionp - 1);
     i = (INKContInternal *)a->continuation;
diff --git a/src/traffic_server/InkAPITest.cc b/src/traffic_server/InkAPITest.cc
index 811f1be..dcf7a84 100644
--- a/src/traffic_server/InkAPITest.cc
+++ b/src/traffic_server/InkAPITest.cc
@@ -1882,7 +1882,7 @@ cache_handler(TSCont contp, TSEvent event, void *data)
 
       // now waiting for 100ms to make sure the key is
       // written in directory remove the content
-      TSContSchedule(contp, 100, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 100, TS_THREAD_POOL_NET);
     }
 
     return 1;
@@ -2463,7 +2463,7 @@ REGRESSION_TEST(SDK_API_TSActionCancel)(RegressionTest *test, int /* atype ATS_U
 
   TSMutex cont_mutex = TSMutexCreate();
   TSCont contp       = TSContCreate(action_cancel_handler, cont_mutex);
-  TSAction actionp   = TSContSchedule(contp, 10000, TS_THREAD_POOL_DEFAULT);
+  TSAction actionp   = TSContScheduleOnPool(contp, 10000, TS_THREAD_POOL_NET);
 
   TSMutexLock(cont_mutex);
   if (TSActionDone(actionp)) {
@@ -2475,7 +2475,7 @@ REGRESSION_TEST(SDK_API_TSActionCancel)(RegressionTest *test, int /* atype ATS_U
   }
   TSMutexUnlock(cont_mutex);
 
-  TSContSchedule(contp, 0, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
 }
 
 //////////////////////////////////////////////
@@ -2594,7 +2594,7 @@ REGRESSION_TEST(SDK_API_TSContDataGet)(RegressionTest *test, int /* atype ATS_UN
 
   TSContDataSet(contp, (void *)my_data);
 
-  TSContSchedule(contp, 0, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
 }
 
 //////////////////////////////////////////////
@@ -2633,7 +2633,7 @@ REGRESSION_TEST(SDK_API_TSContMutexGet)(RegressionTest *test, int /* atype ATS_U
 //////////////////////////////////////////////
 //       SDK_API_TSCont
 //
-// Unit Test for API: TSContSchedule
+// Unit Test for API: TSContScheduleOnPool
 //////////////////////////////////////////////
 
 // this is needed for asynchronous APIs
@@ -2649,15 +2649,15 @@ cont_schedule_handler(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */
 {
   if (event == TS_EVENT_IMMEDIATE) {
     // Test Case 1
-    SDK_RPRINT(SDK_ContSchedule_test, "TSContSchedule", "TestCase1", TC_PASS, "ok");
+    SDK_RPRINT(SDK_ContSchedule_test, "TSContScheduleOnPool", "TestCase1", TC_PASS, "ok");
     tc1_count++;
   } else if (event == TS_EVENT_TIMEOUT) {
     // Test Case 2
-    SDK_RPRINT(SDK_ContSchedule_test, "TSContSchedule", "TestCase2", TC_PASS, "ok");
+    SDK_RPRINT(SDK_ContSchedule_test, "TSContScheduleOnPool", "TestCase2", TC_PASS, "ok");
     tc2_count++;
   } else {
     // If we receive a bad event, it's a failure
-    SDK_RPRINT(SDK_ContSchedule_test, "TSContSchedule", "TestCase1|2", TC_FAIL, "received unexpected event number %d", event);
+    SDK_RPRINT(SDK_ContSchedule_test, "TSContScheduleOnPool", "TestCase1|2", TC_FAIL, "received unexpected event number %d", event);
     *SDK_ContSchedule_pstatus = REGRESSION_TEST_FAILED;
     return 0;
   }
@@ -3035,10 +3035,10 @@ REGRESSION_TEST(SDK_API_TSContSchedule)(RegressionTest *test, int /* atype ATS_U
   TSCont contp2 = TSContCreate(cont_schedule_handler, TSMutexCreate());
 
   // Test Case 1: schedule immediate
-  TSContSchedule(contp, 0, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
 
   // Test Case 2: schedule in 10ms
-  TSContSchedule(contp2, 10, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(contp2, 10, TS_THREAD_POOL_NET);
 }
 
 //////////////////////////////////////////////////////////////////////////////
@@ -3507,7 +3507,7 @@ mytest_handler(TSCont contp, TSEvent event, void *data)
   case TS_EVENT_TIMEOUT:
     /* Browser still waiting the response ? */
     if (test->browser->status == REQUEST_INPROGRESS) {
-      TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
     }
     /* Browser got the response. test is over. clean up */
     else {
@@ -3600,7 +3600,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpHookAdd)(RegressionTest *test, int /* atyp
 
   /* Wait until transaction is done */
   if (socktest->browser->status == REQUEST_INPROGRESS) {
-    TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+    TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
   }
 
   return;
@@ -6443,7 +6443,7 @@ REGRESSION_TEST(SDK_API_TSTextLog)(RegressionTest *test, int /* atype ATS_UNUSED
   data->log              = log;
   TSContDataSet(log_test_cont, data);
 
-  TSContSchedule(log_test_cont, 6000, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(log_test_cont, 6000, TS_THREAD_POOL_NET);
   return;
 }
 
@@ -7000,7 +7000,7 @@ ssn_handler(TSCont contp, TSEvent event, void *edata)
   case TS_EVENT_TIMEOUT:
     /* Browser still waiting the response ? */
     if (data->browser->status == REQUEST_INPROGRESS) {
-      TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
     }
     /* Browser got the response. test is over. clean up */
     else {
@@ -7085,7 +7085,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpSsn)(RegressionTest *test, int /* atype AT
 
   /* Wait until transaction is done */
   if (socktest->browser->status == REQUEST_INPROGRESS) {
-    TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+    TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
   }
 
   return;
@@ -7244,13 +7244,13 @@ parent_proxy_handler(TSCont contp, TSEvent event, void *edata)
       if (ptest->configured) {
         // If we are still in progress, reschedule.
         rprintf(ptest->regtest, "waiting for response\n");
-        TSContSchedule(contp, 100, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 100, TS_THREAD_POOL_NET);
         break;
       }
 
       if (!ptest->parent_routing_enabled()) {
         rprintf(ptest->regtest, "waiting for configuration\n");
-        TSContSchedule(contp, 100, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 100, TS_THREAD_POOL_NET);
         break;
       }
 
@@ -7330,7 +7330,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpParentProxySet_Fail)(RegressionTest *test,
   ptest->os = synserver_create(SYNSERVER_LISTEN_PORT, TSContCreate(synserver_vc_refuse, TSMutexCreate()));
   synserver_start(ptest->os);
 
-  TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
 }
 
 EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpParentProxySet_Success)(RegressionTest *test, int level, int *pstatus)
@@ -7362,7 +7362,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpParentProxySet_Success)(RegressionTest *te
   ptest->os = synserver_create(SYNSERVER_LISTEN_PORT, TSContCreate(synserver_vc_accept, TSMutexCreate()));
   synserver_start(ptest->os);
 
-  TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
 }
 
 /////////////////////////////////////////////////////
@@ -7485,12 +7485,12 @@ cache_hook_handler(TSCont contp, TSEvent event, void *edata)
     /* Browser still waiting the response ? */
     if (data->first_time == true) {
       if (data->browser1->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
     } else {
       if (data->browser2->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
     }
@@ -7508,7 +7508,7 @@ cache_hook_handler(TSCont contp, TSEvent event, void *edata)
         /* Send another similar client request */
         synclient_txn_send_request(data->browser2, data->request);
         ink_assert(REQUEST_INPROGRESS == data->browser2->status);
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
 
@@ -7577,7 +7577,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpTxnCache)(RegressionTest *test, int /* aty
   synclient_txn_send_request(socktest->browser1, socktest->request);
 
   /* Wait until transaction is done */
-  TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
 
   return;
 }
@@ -7951,37 +7951,37 @@ transform_hook_handler(TSCont contp, TSEvent event, void *edata)
     switch (data->req_no) {
     case 1:
       if (data->browser1->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
       data->req_no++;
       Debug(UTDBG_TAG "_transform", "Running Browser 2");
       synclient_txn_send_request(data->browser2, data->request2);
-      TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
       return 0;
     case 2:
       if (data->browser2->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
       data->req_no++;
       Debug(UTDBG_TAG "_transform", "Running Browser 3");
       synclient_txn_send_request(data->browser3, data->request1);
-      TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
       return 0;
     case 3:
       if (data->browser3->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
       data->req_no++;
       Debug(UTDBG_TAG "_transform", "Running Browser 4");
       synclient_txn_send_request(data->browser4, data->request2);
-      TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
       return 0;
     case 4:
       if (data->browser4->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
       synserver_delete(data->os);
@@ -8120,7 +8120,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpTxnTransform)(RegressionTest *test, int /*
   // synclient_txn_send_request(socktest->browser2, socktest->request2);
 
   /* Wait until transaction is done */
-  TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
 
   return;
 }
@@ -8232,12 +8232,12 @@ altinfo_hook_handler(TSCont contp, TSEvent event, void *edata)
     /* Browser still waiting the response ? */
     if (data->first_time == true) {
       if ((data->browser1->status == REQUEST_INPROGRESS) || (data->browser2->status == REQUEST_INPROGRESS)) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
     } else {
       if (data->browser3->status == REQUEST_INPROGRESS) {
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
     }
@@ -8257,7 +8257,7 @@ altinfo_hook_handler(TSCont contp, TSEvent event, void *edata)
 
         /* Register to HTTP hooks that are called in case of alternate selection */
         TSHttpHookAdd(TS_HTTP_SELECT_ALT_HOOK, contp);
-        TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+        TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
         return 0;
       }
 
@@ -8336,7 +8336,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpAltInfo)(RegressionTest *test, int /* atyp
   synclient_txn_send_request(socktest->browser2, socktest->request2);
 
   /* Wait until transaction is done */
-  TSContSchedule(cont, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont, 25, TS_THREAD_POOL_NET);
 
   return;
 }
@@ -8425,7 +8425,7 @@ cont_test_handler(TSCont contp, TSEvent event, void *edata)
     /* Browser still waiting the response ? */
     if (data->browser->status == REQUEST_INPROGRESS) {
       TSDebug(UTDBG_TAG, "Browser still waiting response...");
-      TSContSchedule(contp, 25, TS_THREAD_POOL_DEFAULT);
+      TSContScheduleOnPool(contp, 25, TS_THREAD_POOL_NET);
     }
     /* Browser got the response */
     else {
@@ -8517,7 +8517,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectIntercept)(RegressionTest *test,
   synclient_txn_send_request_to_vc(data->browser, data->request, data->vc);
 
   /* Wait until transaction is done */
-  TSContSchedule(cont_test, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont_test, 25, TS_THREAD_POOL_NET);
 
   return;
 }
@@ -8556,7 +8556,7 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_TSHttpConnectServerIntercept)(RegressionTest *
   synclient_txn_send_request_to_vc(data->browser, data->request, data->vc);
 
   /* Wait until transaction is done */
-  TSContSchedule(cont_test, 25, TS_THREAD_POOL_DEFAULT);
+  TSContScheduleOnPool(cont_test, 25, TS_THREAD_POOL_NET);
 
   return;
 }
diff --git a/src/traffic_server/InkIOCoreAPI.cc b/src/traffic_server/InkIOCoreAPI.cc
index d3dc3bf..221aa36 100644
--- a/src/traffic_server/InkIOCoreAPI.cc
+++ b/src/traffic_server/InkIOCoreAPI.cc
@@ -227,6 +227,12 @@ TSThreadSelf(void)
   return ithread;
 }
 
+TSEventThread
+TSEventThreadSelf(void)
+{
+  return reinterpret_cast<TSEventThread>(this_event_thread());
+}
+
 ////////////////////////////////////////////////////////////////////
 //
 // Mutexes
diff --git a/src/tscpp/api/AsyncTimer.cc b/src/tscpp/api/AsyncTimer.cc
index 9fab335..9387409 100644
--- a/src/tscpp/api/AsyncTimer.cc
+++ b/src/tscpp/api/AsyncTimer.cc
@@ -56,7 +56,7 @@ handleTimerEvent(TSCont cont, TSEvent event, void *edata)
     state->initial_timer_action_ = nullptr; // mark it so that it won't be canceled later on
     if (state->type_ == AsyncTimer::TYPE_PERIODIC) {
       LOG_DEBUG("Scheduling periodic event now");
-      state->periodic_timer_action_ = TSContScheduleEvery(state->cont_, state->period_in_ms_, state->thread_pool_);
+      state->periodic_timer_action_ = TSContScheduleEveryOnPool(state->cont_, state->period_in_ms_, state->thread_pool_);
     }
   }
   if (!state->dispatch_controller_->dispatch()) {
@@ -88,10 +88,10 @@ AsyncTimer::run()
   }
   if (one_off_timeout_in_ms) {
     LOG_DEBUG("Scheduling initial/one-off event");
-    state_->initial_timer_action_ = TSContSchedule(state_->cont_, one_off_timeout_in_ms, state_->thread_pool_);
+    state_->initial_timer_action_ = TSContScheduleOnPool(state_->cont_, one_off_timeout_in_ms, state_->thread_pool_);
   } else if (regular_timeout_in_ms) {
     LOG_DEBUG("Scheduling regular timer events");
-    state_->periodic_timer_action_ = TSContScheduleEvery(state_->cont_, regular_timeout_in_ms, state_->thread_pool_);
+    state_->periodic_timer_action_ = TSContScheduleEveryOnPool(state_->cont_, regular_timeout_in_ms, state_->thread_pool_);
   }
 }
 
diff --git a/src/tscpp/api/InterceptPlugin.cc b/src/tscpp/api/InterceptPlugin.cc
index db72a59..8ccc5a2 100644
--- a/src/tscpp/api/InterceptPlugin.cc
+++ b/src/tscpp/api/InterceptPlugin.cc
@@ -359,7 +359,7 @@ handleEvents(TSCont cont, TSEvent pristine_event, void *pristine_edata)
       state->saved_event_ = event;
       state->saved_edata_ = edata;
     }
-    state->timeout_action_ = TSContSchedule(cont, 1, TS_THREAD_POOL_DEFAULT);
+    state->timeout_action_ = TSContScheduleOnPool(cont, 1, TS_THREAD_POOL_NET);
     return 0;
   }
   if (event == TS_EVENT_TIMEOUT) { // we have a saved event to restore
diff --git a/tests/gold_tests/cont_schedule/gold/http_200.gold b/tests/gold_tests/cont_schedule/gold/http_200.gold
new file mode 100644
index 0000000..f3752f1
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/gold/http_200.gold
@@ -0,0 +1,9 @@
+``
+< HTTP/1.1 200 OK
+< Date: ``
+< Age: ``
+< Transfer-Encoding: chunked
+< Proxy-Connection: keep-alive
+< Server: ATS/``
+< 
+``
diff --git a/tests/gold_tests/cont_schedule/gold/schedule.gold b/tests/gold_tests/cont_schedule/gold/schedule.gold
new file mode 100644
index 0000000..7b5d1f6
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/gold/schedule.gold
@@ -0,0 +1,4 @@
+``
+``(TSContSchedule_test.check) pass [should be the same thread]
+``(TSContSchedule_test.check) pass [should not be the same thread]
+``
diff --git a/tests/gold_tests/cont_schedule/gold/schedule_on_pool.gold b/tests/gold_tests/cont_schedule/gold/schedule_on_pool.gold
new file mode 100644
index 0000000..42328e0
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/gold/schedule_on_pool.gold
@@ -0,0 +1,9 @@
+``
+``ET_NET``TSContScheduleOnPool handler 1``
+``ET_NET``TSContScheduleOnPool handler 1``
+``(TSContSchedule_test.check) pass [should not be the same thread]
+``ET_TASK``TSContScheduleOnPool handler 2``
+``(TSContSchedule_test.check) pass [should not be the same thread]
+``ET_TASK``TSContScheduleOnPool handler 2``
+``(TSContSchedule_test.check) pass [should not be the same thread]
+``
diff --git a/tests/gold_tests/cont_schedule/gold/schedule_on_thread.gold b/tests/gold_tests/cont_schedule/gold/schedule_on_thread.gold
new file mode 100644
index 0000000..7b5d1f6
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/gold/schedule_on_thread.gold
@@ -0,0 +1,4 @@
+``
+``(TSContSchedule_test.check) pass [should be the same thread]
+``(TSContSchedule_test.check) pass [should not be the same thread]
+``
diff --git a/tests/gold_tests/cont_schedule/gold/thread_affinity.gold b/tests/gold_tests/cont_schedule/gold/thread_affinity.gold
new file mode 100644
index 0000000..b81b4f5
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/gold/thread_affinity.gold
@@ -0,0 +1,5 @@
+``
+``pass [affinity thread is null]
+``pass [affinity thread is set]
+``pass [affinity thread is cleared]
+``
diff --git a/tests/gold_tests/cont_schedule/schedule.test.py b/tests/gold_tests/cont_schedule/schedule.test.py
new file mode 100644
index 0000000..e2867ef
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/schedule.test.py
@@ -0,0 +1,72 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+import os
+
+Test.Summary = 'Test TSContSchedule API'
+Test.SkipUnless(Condition.HasProgram('curl', 'Curl need to be installed on system for this test to work'))
+
+Test.ContinueOnFail = True
+
+# Define default ATS
+ts = Test.MakeATSProcess('ts')
+server = Test.MakeOriginServer('server')
+
+Test.testName = ''
+request_header = {
+    'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+response_header = {
+    'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+server.addResponse("sessionfile.log", request_header, response_header)
+
+ts.Disk.records_config.update({
+    'proxy.config.exec_thread.autoconfig': 0,
+    'proxy.config.exec_thread.autoconfig.scale': 1.5,
+    'proxy.config.exec_thread.limit': 32,
+    'proxy.config.accept_threads': 1,
+    'proxy.config.task_threads': 2,
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'TSContSchedule_test'
+})
+ts.Disk.remap_config.AddLine(
+    'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
+)
+
+# Load plugin
+Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'cont_schedule.cc'), ts)
+
+# www.example.com Host
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
+tr.StillRunningAfter = ts
+tr.StillRunningAfter = server
+
+# Check Plugin Results
+ts.Streams.All = "gold/schedule.gold"
+ts.Streams.All += Testers.ExcludesExpression('fail', 'should not contain "fail"')
diff --git a/tests/gold_tests/cont_schedule/schedule_on_pool.test.py b/tests/gold_tests/cont_schedule/schedule_on_pool.test.py
new file mode 100644
index 0000000..9fb5419
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/schedule_on_pool.test.py
@@ -0,0 +1,72 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+import os
+
+Test.Summary = 'Test TSContScheduleOnPool API'
+Test.SkipUnless(Condition.HasProgram('curl', 'Curl need to be installed on system for this test to work'))
+
+Test.ContinueOnFail = True
+
+# Define default ATS
+ts = Test.MakeATSProcess('ts')
+server = Test.MakeOriginServer('server')
+
+Test.testName = ''
+request_header = {
+    'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+response_header = {
+    'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+server.addResponse("sessionfile.log", request_header, response_header)
+
+ts.Disk.records_config.update({
+    'proxy.config.exec_thread.autoconfig': 0,
+    'proxy.config.exec_thread.autoconfig.scale': 1.5,
+    'proxy.config.exec_thread.limit': 32,
+    'proxy.config.accept_threads': 1,
+    'proxy.config.task_threads': 2,
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'TSContSchedule_test'
+})
+ts.Disk.remap_config.AddLine(
+    'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
+)
+
+# Load plugin
+Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'cont_schedule.cc'), ts, 'pool')
+
+# www.example.com Host
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
+tr.StillRunningAfter = ts
+tr.StillRunningAfter = server
+
+# Check Plugin Results
+ts.Streams.All = "gold/schedule_on_pool.gold"
+ts.Streams.All += Testers.ExcludesExpression('fail', 'should not contain "fail"')
diff --git a/tests/gold_tests/cont_schedule/schedule_on_thread.test.py b/tests/gold_tests/cont_schedule/schedule_on_thread.test.py
new file mode 100644
index 0000000..5b5ab5f
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/schedule_on_thread.test.py
@@ -0,0 +1,72 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+import os
+
+Test.Summary = 'Test TSContScheduleOnThread API'
+Test.SkipUnless(Condition.HasProgram('curl', 'Curl need to be installed on system for this test to work'))
+
+Test.ContinueOnFail = True
+
+# Define default ATS
+ts = Test.MakeATSProcess('ts')
+server = Test.MakeOriginServer('server')
+
+Test.testName = ''
+request_header = {
+    'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+response_header = {
+    'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+server.addResponse("sessionfile.log", request_header, response_header)
+
+ts.Disk.records_config.update({
+    'proxy.config.exec_thread.autoconfig': 0,
+    'proxy.config.exec_thread.autoconfig.scale': 1.5,
+    'proxy.config.exec_thread.limit': 32,
+    'proxy.config.accept_threads': 1,
+    'proxy.config.task_threads': 2,
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'TSContSchedule_test'
+})
+ts.Disk.remap_config.AddLine(
+    'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
+)
+
+# Load plugin
+Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'cont_schedule.cc'), ts, 'thread')
+
+# www.example.com Host
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
+tr.StillRunningAfter = ts
+tr.StillRunningAfter = server
+
+# Check Plugin Results
+ts.Streams.All = "gold/schedule_on_thread.gold"
+ts.Streams.All += Testers.ExcludesExpression('fail', 'should not contain "fail"')
diff --git a/tests/gold_tests/cont_schedule/thread_affinity.test.py b/tests/gold_tests/cont_schedule/thread_affinity.test.py
new file mode 100644
index 0000000..8b91d5a
--- /dev/null
+++ b/tests/gold_tests/cont_schedule/thread_affinity.test.py
@@ -0,0 +1,72 @@
+'''
+'''
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+import os
+
+Test.Summary = 'Test TSContThreadAffinity APIs'
+Test.SkipUnless(Condition.HasProgram('curl', 'Curl need to be installed on system for this test to work'))
+
+Test.ContinueOnFail = True
+
+# Define default ATS
+ts = Test.MakeATSProcess('ts')
+server = Test.MakeOriginServer('server')
+
+Test.testName = ''
+request_header = {
+    'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+response_header = {
+    'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
+    'timestamp': '1469733493.993',
+    'body': ''
+}
+server.addResponse("sessionfile.log", request_header, response_header)
+
+ts.Disk.records_config.update({
+    'proxy.config.exec_thread.autoconfig': 0,
+    'proxy.config.exec_thread.autoconfig.scale': 1.5,
+    'proxy.config.exec_thread.limit': 32,
+    'proxy.config.accept_threads': 1,
+    'proxy.config.task_threads': 2,
+    'proxy.config.diags.debug.enabled': 1,
+    'proxy.config.diags.debug.tags': 'TSContSchedule_test'
+})
+ts.Disk.remap_config.AddLine(
+    'map / http://127.0.0.1:{0}'.format(server.Variables.Port)
+)
+
+# Load plugin
+Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'cont_schedule.cc'), ts, 'affinity')
+
+# www.example.com Host
+tr = Test.AddTestRun()
+tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
+tr.Processes.Default.ReturnCode = 0
+tr.Processes.Default.StartBefore(ts)
+tr.Processes.Default.StartBefore(server)
+tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
+tr.StillRunningAfter = ts
+tr.StillRunningAfter = server
+
+# Check Plugin Results
+ts.Streams.All = "gold/thread_affinity.gold"
+ts.Streams.All += Testers.ExcludesExpression('fail', 'should not contain "fail"')
diff --git a/tests/tools/plugins/cont_schedule.cc b/tests/tools/plugins/cont_schedule.cc
new file mode 100644
index 0000000..accf3db
--- /dev/null
+++ b/tests/tools/plugins/cont_schedule.cc
@@ -0,0 +1,327 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstring>
+#include <stdlib.h> // for abort
+#include <ts/ts.h>  // for debug
+
+// debug messages viewable by setting 'proxy.config.diags.debug.tags'
+// in 'records.config'
+
+// debug messages during one-time initialization
+static const char DEBUG_TAG_INIT[] = "TSContSchedule_test.init";
+static const char DEBUG_TAG_SCHD[] = "TSContSchedule_test.schedule";
+static const char DEBUG_TAG_HDL[]  = "TSContSchedule_test.handler";
+static const char DEBUG_TAG_CHK[]  = "TSContSchedule_test.check";
+
+// plugin registration info
+static char plugin_name[]   = "TSContSchedule_test";
+static char vendor_name[]   = "apache";
+static char support_email[] = "duke8253@apache.org";
+
+static int test_flag = 0;
+
+static TSEventThread test_thread = nullptr;
+static TSThread check_thread     = nullptr;
+
+static int TSContSchedule_handler_1(TSCont contp, TSEvent event, void *edata);
+static int TSContSchedule_handler_2(TSCont contp, TSEvent event, void *edata);
+static int TSContScheduleOnPool_handler_1(TSCont contp, TSEvent event, void *edata);
+static int TSContScheduleOnPool_handler_2(TSCont contp, TSEvent event, void *edata);
+static int TSContScheduleOnThread_handler_1(TSCont contp, TSEvent event, void *edata);
+static int TSContScheduleOnThread_handler_2(TSCont contp, TSEvent event, void *edata);
+static int TSContThreadAffinity_handler(TSCont contp, TSEvent event, void *edata);
+
+static int
+TSContSchedule_handler_1(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContSchedule handler 1 thread [%p]", TSThreadSelf());
+  if (test_thread == nullptr) {
+    test_thread = TSEventThreadSelf();
+
+    TSCont contp_new = TSContCreate(TSContSchedule_handler_2, TSMutexCreate());
+
+    if (contp_new == nullptr) {
+      TSDebug(DEBUG_TAG_HDL, "[%s] could not create continuation", plugin_name);
+      abort();
+    } else {
+      TSDebug(DEBUG_TAG_HDL, "[%s] scheduling continuation", plugin_name);
+      TSContThreadAffinitySet(contp_new, test_thread);
+      TSContSchedule(contp_new, 0);
+      TSContSchedule(contp_new, 100);
+    }
+  } else if (check_thread == nullptr) {
+    TSDebug(DEBUG_TAG_CHK, "fail [schedule delay not applied]");
+  } else {
+    if (check_thread != TSThreadSelf()) {
+      TSDebug(DEBUG_TAG_CHK, "pass [should not be the same thread]");
+    } else {
+      TSDebug(DEBUG_TAG_CHK, "fail [on the same thread]");
+    }
+  }
+  return 0;
+}
+
+static int
+TSContSchedule_handler_2(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContSchedule handler 2 thread [%p]", TSThreadSelf());
+  if (check_thread == nullptr) {
+    check_thread = TSThreadSelf();
+  } else if (check_thread == TSThreadSelf()) {
+    TSDebug(DEBUG_TAG_CHK, "pass [should be the same thread]");
+  } else {
+    TSDebug(DEBUG_TAG_CHK, "fail [not the same thread]");
+  }
+  return 0;
+}
+
+static int
+TSContScheduleOnPool_handler_1(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContScheduleOnPool handler 1 thread [%p]", TSThreadSelf());
+  if (check_thread == nullptr) {
+    check_thread = TSThreadSelf();
+  } else {
+    if (check_thread != TSThreadSelf()) {
+      TSDebug(DEBUG_TAG_CHK, "pass [should not be the same thread]");
+    } else {
+      TSDebug(DEBUG_TAG_CHK, "fail [on the same thread]");
+    }
+    check_thread = TSThreadSelf();
+  }
+  return 0;
+}
+
+static int
+TSContScheduleOnPool_handler_2(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContScheduleOnPool handler 2 thread [%p]", TSThreadSelf());
+  if (check_thread == nullptr) {
+    check_thread = TSThreadSelf();
+  } else {
+    if (check_thread != TSThreadSelf()) {
+      TSDebug(DEBUG_TAG_CHK, "pass [should not be the same thread]");
+    } else {
+      TSDebug(DEBUG_TAG_CHK, "fail [on the same thread]");
+    }
+    check_thread = TSThreadSelf();
+  }
+  return 0;
+}
+
+static int
+TSContScheduleOnThread_handler_1(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContScheduleOnThread handler 1 thread [%p]", TSThreadSelf());
+  if (test_thread == nullptr) {
+    test_thread = TSEventThreadSelf();
+
+    TSCont contp_new = TSContCreate(TSContScheduleOnThread_handler_2, TSMutexCreate());
+
+    if (contp_new == nullptr) {
+      TSDebug(DEBUG_TAG_HDL, "[%s] could not create continuation", plugin_name);
+      abort();
+    } else {
+      TSDebug(DEBUG_TAG_HDL, "[%s] scheduling continuation", plugin_name);
+      TSContScheduleOnThread(contp_new, 0, test_thread);
+      TSContScheduleOnThread(contp_new, 100, test_thread);
+    }
+  } else if (check_thread == nullptr) {
+    TSDebug(DEBUG_TAG_CHK, "fail [schedule delay not applied]");
+  } else {
+    if (check_thread != TSThreadSelf()) {
+      TSDebug(DEBUG_TAG_CHK, "pass [should not be the same thread]");
+    } else {
+      TSDebug(DEBUG_TAG_CHK, "fail [on the same thread]");
+    }
+  }
+  return 0;
+}
+
+static int
+TSContScheduleOnThread_handler_2(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContScheduleOnThread handler 2 thread [%p]", TSThreadSelf());
+  if (check_thread == nullptr) {
+    check_thread = TSThreadSelf();
+  } else if (check_thread == TSThreadSelf()) {
+    TSDebug(DEBUG_TAG_CHK, "pass [should be the same thread]");
+  } else {
+    TSDebug(DEBUG_TAG_CHK, "fail [not the same thread]");
+  }
+  return 0;
+}
+
+static int
+TSContThreadAffinity_handler(TSCont contp, TSEvent event, void *edata)
+{
+  TSDebug(DEBUG_TAG_HDL, "TSContThreadAffinity handler thread [%p]", TSThreadSelf());
+
+  test_thread = TSEventThreadSelf();
+
+  if (TSContThreadAffinityGet(contp) == nullptr) {
+    TSDebug(DEBUG_TAG_CHK, "pass [affinity thread is null]");
+    TSContThreadAffinitySet(contp, TSEventThreadSelf());
+    if (TSContThreadAffinityGet(contp) == test_thread) {
+      TSDebug(DEBUG_TAG_CHK, "pass [affinity thread is set]");
+      TSContThreadAffinityClear(contp);
+      if (TSContThreadAffinityGet(contp) == nullptr) {
+        TSDebug(DEBUG_TAG_CHK, "pass [affinity thread is cleared]");
+      } else {
+        TSDebug(DEBUG_TAG_CHK, "fail [affinity thread is not cleared]");
+      }
+    } else {
+      TSDebug(DEBUG_TAG_CHK, "fail [affinity thread is not set]");
+    }
+  } else {
+    TSDebug(DEBUG_TAG_CHK, "fail [affinity thread is not null]");
+  }
+
+  return 0;
+}
+
+void
+TSContSchedule_test()
+{
+  TSCont contp = TSContCreate(TSContSchedule_handler_1, TSMutexCreate());
+
+  if (contp == nullptr) {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] could not create continuation", plugin_name);
+    abort();
+  } else {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] scheduling continuation", plugin_name);
+    TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
+    TSContScheduleOnPool(contp, 200, TS_THREAD_POOL_NET);
+  }
+}
+
+void
+TSContScheduleOnPool_test()
+{
+  TSCont contp_1 = TSContCreate(TSContScheduleOnPool_handler_1, TSMutexCreate());
+  TSCont contp_2 = TSContCreate(TSContScheduleOnPool_handler_2, TSMutexCreate());
+
+  if (contp_1 == nullptr || contp_2 == nullptr) {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] could not create continuation", plugin_name);
+    abort();
+  } else {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] scheduling continuation", plugin_name);
+    TSContScheduleOnPool(contp_1, 0, TS_THREAD_POOL_NET);
+    TSContScheduleOnPool(contp_1, 100, TS_THREAD_POOL_NET);
+    TSContScheduleOnPool(contp_2, 200, TS_THREAD_POOL_TASK);
+    TSContScheduleOnPool(contp_2, 300, TS_THREAD_POOL_TASK);
+  }
+}
+
+void
+TSContScheduleOnThread_test()
+{
+  TSCont contp = TSContCreate(TSContScheduleOnThread_handler_1, TSMutexCreate());
+
+  if (contp == nullptr) {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] could not create continuation", plugin_name);
+    abort();
+  } else {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] scheduling continuation", plugin_name);
+    TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
+    TSContScheduleOnPool(contp, 200, TS_THREAD_POOL_NET);
+  }
+}
+
+void
+TSContThreadAffinity_test()
+{
+  TSCont contp = TSContCreate(TSContThreadAffinity_handler, TSMutexCreate());
+
+  if (contp == nullptr) {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] could not create continuation", plugin_name);
+    abort();
+  } else {
+    TSDebug(DEBUG_TAG_SCHD, "[%s] scheduling continuation", plugin_name);
+    TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
+  }
+}
+
+static int
+LifecycleHookTracer(TSCont contp, TSEvent event, void *edata)
+{
+  if (event == TS_EVENT_LIFECYCLE_TASK_THREADS_READY) {
+    switch (test_flag) {
+    case 1:
+      TSContSchedule_test();
+      break;
+    case 2:
+      TSContScheduleOnPool_test();
+      break;
+    case 3:
+      TSContScheduleOnThread_test();
+      break;
+    case 4:
+      TSContThreadAffinity_test();
+      break;
+    default:
+      break;
+    }
+  }
+  return 0;
+}
+
+void
+TSPluginInit(int argc, const char *argv[])
+{
+  if (argc == 1) {
+    TSDebug(DEBUG_TAG_INIT, "initializing plugin for testing TSContSchedule");
+    test_flag = 1;
+  } else if (argc == 2) {
+    int len = strlen(argv[1]);
+    if (len == 4 && strncmp(argv[1], "pool", 4) == 0) {
+      TSDebug(DEBUG_TAG_INIT, "initializing plugin for testing TSContScheduleOnPool");
+      test_flag = 2;
+    } else if (len == 6 && strncmp(argv[1], "thread", 6) == 0) {
+      TSDebug(DEBUG_TAG_INIT, "initializing plugin for testing TSContScheduleOnThread");
+      test_flag = 3;
+    } else if (len == 8 && strncmp(argv[1], "affinity", 8) == 0) {
+      TSDebug(DEBUG_TAG_INIT, "initializing plugin for testing TSContThreadAffinity");
+      test_flag = 4;
+    } else {
+      goto Lerror;
+    }
+  } else {
+    goto Lerror;
+  }
+
+  TSPluginRegistrationInfo info;
+
+  info.plugin_name   = plugin_name;
+  info.vendor_name   = vendor_name;
+  info.support_email = support_email;
+
+  if (TSPluginRegister(&info) != TS_SUCCESS) {
+    TSDebug(DEBUG_TAG_INIT, "[%s] plugin registration failed", plugin_name);
+    abort();
+  }
+
+  TSLifecycleHookAdd(TS_LIFECYCLE_TASK_THREADS_READY_HOOK, TSContCreate(LifecycleHookTracer, TSMutexCreate()));
+
+  return;
+
+Lerror:
+  TSDebug(DEBUG_TAG_INIT, "[%s] plugin invalid argument", plugin_name);
+  abort();
+}
diff --git a/tests/tools/plugins/continuations_verify.cc b/tests/tools/plugins/continuations_verify.cc
index d452968..079c049 100644
--- a/tests/tools/plugins/continuations_verify.cc
+++ b/tests/tools/plugins/continuations_verify.cc
@@ -56,7 +56,7 @@ handle_msg(TSCont contp, TSEvent event, void *edata)
     // threads do not get their thread local copy of the stats
     // merged in.  So externally, test.done was stuck at 0 without
     // the Schedule to a NET thread
-    TSContSchedule(contp, 0, TS_THREAD_POOL_NET);
+    TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
   } else {
     TSDebug(DEBUG_TAG_MSG, "event %d", event);
     TSStatIntIncrement(stat_test_done, 1);
diff --git a/tests/tools/plugins/ssl_hook_test.cc b/tests/tools/plugins/ssl_hook_test.cc
index f7fde1c..a863c8c 100644
--- a/tests/tools/plugins/ssl_hook_test.cc
+++ b/tests/tools/plugins/ssl_hook_test.cc
@@ -73,7 +73,7 @@ CB_Pre_Accept_Delay(TSCont cont, TSEvent event, void *edata)
   TSContDataSet(cb, ssl_vc);
 
   // Schedule to reenable in a bit
-  TSContSchedule(cb, 2000, TS_THREAD_POOL_NET);
+  TSContScheduleOnPool(cb, 2000, TS_THREAD_POOL_NET);
 
   return TS_SUCCESS;
 }
@@ -108,7 +108,7 @@ CB_out_start_delay(TSCont cont, TSEvent event, void *edata)
   TSContDataSet(cb, ssl_vc);
 
   // Schedule to reenable in a bit
-  TSContSchedule(cb, 2000, TS_THREAD_POOL_NET);
+  TSContScheduleOnPool(cb, 2000, TS_THREAD_POOL_NET);
 
   return TS_SUCCESS;
 }
@@ -183,7 +183,7 @@ CB_Cert(TSCont cont, TSEvent event, void *edata)
   TSContDataSet(cb, ssl_vc);
 
   // Schedule to reenable in a bit
-  TSContSchedule(cb, 2000, TS_THREAD_POOL_NET);
+  TSContScheduleOnPool(cb, 2000, TS_THREAD_POOL_NET);
 
   return TS_SUCCESS;
 }
diff --git a/tests/tools/plugins/ssntxnorder_verify.cc b/tests/tools/plugins/ssntxnorder_verify.cc
index 9ff16ae..fd0a2ce 100644
--- a/tests/tools/plugins/ssntxnorder_verify.cc
+++ b/tests/tools/plugins/ssntxnorder_verify.cc
@@ -249,7 +249,7 @@ handle_order(TSCont contp, TSEvent event, void *edata)
     if (!strcmp(ctl_tag, msgp->tag) && strncmp(ctl_dump, reinterpret_cast<const char *>(msgp->data), strlen(ctl_dump)) == 0) {
       dump_tables();
     } else {
-      TSContSchedule(contp, 0, TS_THREAD_POOL_NET);
+      TSContScheduleOnPool(contp, 0, TS_THREAD_POOL_NET);
     }
 
     break;