You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by zw...@apache.org on 2022/08/10 17:09:17 UTC
[trafficserver] branch 9.2.x updated: Add stack guard pages (#8996)
This is an automated email from the ASF dual-hosted git repository.
zwoop pushed a commit to branch 9.2.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/9.2.x by this push:
new 347adb11a Add stack guard pages (#8996)
347adb11a is described below
commit 347adb11a3e2d8f3f87fa108d5e34e40eae5bbc8
Author: Mo Chen <un...@gmail.com>
AuthorDate: Wed Aug 3 18:08:07 2022 -0500
Add stack guard pages (#8996)
Use r/w protected pages on top of stacks to guard against stack
overflow. The number of VM pages to use for guarding can be set
via the config option proxy.config.thread.default.stackguard_pages.
(cherry picked from commit 1abf6c01164e3893f25cb14460d7da0a5007112d)
---
iocore/eventsystem/UnixEventProcessor.cc | 59 ++++++++++++++++++++++++++++----
mgmt/RecordsConfig.cc | 2 ++
2 files changed, 54 insertions(+), 7 deletions(-)
diff --git a/iocore/eventsystem/UnixEventProcessor.cc b/iocore/eventsystem/UnixEventProcessor.cc
index ec860943d..2217d6115 100644
--- a/iocore/eventsystem/UnixEventProcessor.cc
+++ b/iocore/eventsystem/UnixEventProcessor.cc
@@ -47,7 +47,7 @@ public:
void init();
/// Set the affinity for the current thread.
int set_affinity(int, Event *);
- /// Allocate a stack.
+ /// Allocate a stack and set guard pages.
/// @internal This is the external entry point and is different depending on
/// whether HWLOC is enabled.
void *alloc_stack(EThread *t, size_t stacksize);
@@ -55,7 +55,8 @@ public:
protected:
/// Allocate a hugepage stack.
/// If huge pages are not enable, allocate a basic stack.
- void *alloc_hugepage_stack(size_t stacksize);
+ void *do_alloc_stack(size_t stacksize);
+ void setup_stack_guard(void *stack, int stackguard_pages);
#if TS_USE_HWLOC
@@ -136,10 +137,54 @@ public:
} Thread_Init_Func;
} // namespace
+void
+ThreadAffinityInitializer::setup_stack_guard(void *stack, int stackguard_pages)
+{
+#if !(defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__mips__))
+#error Unknown stack growth direction. Determine the stack growth direction of your platform.
+// If your stack grows upwards, you need to change this function and the calculation of stack_begin in do_alloc_stack.
+#endif
+ // Assumption: stack grows down
+ if (stackguard_pages <= 0) {
+ return;
+ }
+
+ size_t pagesize = ats_hugepage_enabled() ? ats_hugepage_size() : ats_pagesize();
+ size_t guardsize = stackguard_pages * pagesize;
+ int ret = mprotect(stack, guardsize, 0);
+ if (ret != 0) {
+ Fatal("Failed to set up stack guard pages: %s (%d)", strerror(errno), errno);
+ }
+}
+
void *
-ThreadAffinityInitializer::alloc_hugepage_stack(size_t stacksize)
+ThreadAffinityInitializer::do_alloc_stack(size_t stacksize)
{
- return ats_hugepage_enabled() ? ats_alloc_hugepage(stacksize) : ats_memalign(ats_pagesize(), stacksize);
+ size_t pagesize = ats_hugepage_enabled() ? ats_hugepage_size() : ats_pagesize();
+ int stackguard_pages;
+ REC_ReadConfigInteger(stackguard_pages, "proxy.config.thread.default.stackguard_pages");
+ ink_release_assert(stackguard_pages >= 0);
+
+ size_t size = INK_ALIGN(stacksize + stackguard_pages * pagesize, pagesize);
+ int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#ifdef MAP_HUGETLB
+ if (ats_hugepage_enabled()) {
+ mmap_flags |= MAP_HUGETLB;
+ }
+#endif
+ void *stack_and_guard = mmap(nullptr, size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+ if (stack_and_guard == MAP_FAILED) {
+ Error("Failed to allocate stack pages: size = %zu", size);
+ return nullptr;
+ }
+
+ setup_stack_guard(stack_and_guard, stackguard_pages);
+
+ void *stack_begin = static_cast<char *>(stack_and_guard) + stackguard_pages * pagesize;
+ Debug("iocore_thread", "Allocated %zu bytes (%zu bytes in guard pages) for stack {%p-%p guard, %p-%p stack}", size,
+ stackguard_pages * pagesize, stack_and_guard, stack_begin, stack_begin, static_cast<char *>(stack_begin) + stacksize);
+
+ return stack_begin;
}
#if TS_USE_HWLOC
@@ -239,7 +284,7 @@ ThreadAffinityInitializer::alloc_numa_stack(EThread *t, size_t stacksize)
}
// Alloc our stack
- stack = this->alloc_hugepage_stack(stacksize);
+ stack = this->do_alloc_stack(stacksize);
if (mem_policy != HWLOC_MEMBIND_DEFAULT) {
// Now let's set it back to default for this thread.
@@ -260,7 +305,7 @@ ThreadAffinityInitializer::alloc_numa_stack(EThread *t, size_t stacksize)
void *
ThreadAffinityInitializer::alloc_stack(EThread *t, size_t stacksize)
{
- return this->obj_count > 0 ? this->alloc_numa_stack(t, stacksize) : this->alloc_hugepage_stack(stacksize);
+ return this->obj_count > 0 ? this->alloc_numa_stack(t, stacksize) : this->do_alloc_stack(stacksize);
}
#else
@@ -279,7 +324,7 @@ ThreadAffinityInitializer::set_affinity(int, Event *)
void *
ThreadAffinityInitializer::alloc_stack(EThread *, size_t stacksize)
{
- return this->alloc_hugepage_stack(stacksize);
+ return this->do_alloc_stack(stacksize);
}
#endif // TS_USE_HWLOC
diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc
index d40e1a609..d2658db13 100644
--- a/mgmt/RecordsConfig.cc
+++ b/mgmt/RecordsConfig.cc
@@ -125,6 +125,8 @@ static const RecordElement RecordsConfig[] =
,
{RECT_CONFIG, "proxy.config.thread.default.stacksize", RECD_INT, "1048576", RECU_RESTART_TS, RR_NULL, RECC_INT, "[131072-104857600]", RECA_READ_ONLY}
,
+ {RECT_CONFIG, "proxy.config.thread.default.stackguard_pages", RECD_INT, "1", RECU_RESTART_TS, RR_NULL, RECC_INT, "[1-256]", RECA_READ_ONLY}
+ ,
{RECT_CONFIG, "proxy.config.restart.active_client_threshold", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_CONFIG, "proxy.config.restart.stop_listening", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL}