You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@trafficserver.apache.org by ma...@apache.org on 2022/08/03 23:08:13 UTC

[trafficserver] branch master updated: Add stack guard pages (#8996)

This is an automated email from the ASF dual-hosted git repository.

masaori pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
     new 1abf6c011 Add stack guard pages (#8996)
1abf6c011 is described below

commit 1abf6c01164e3893f25cb14460d7da0a5007112d
Author: Mo Chen <un...@gmail.com>
AuthorDate: Wed Aug 3 18:08:07 2022 -0500

    Add stack guard pages (#8996)
    
    Use r/w protected pages on top of stacks to guard against stack
    overflow.  The number of VM pages to use for guarding can be set
    via the config option proxy.config.thread.default.stackguard_pages.
---
 iocore/eventsystem/UnixEventProcessor.cc | 59 ++++++++++++++++++++++++++++----
 mgmt/RecordsConfig.cc                    |  2 ++
 2 files changed, 54 insertions(+), 7 deletions(-)

diff --git a/iocore/eventsystem/UnixEventProcessor.cc b/iocore/eventsystem/UnixEventProcessor.cc
index ec860943d..2217d6115 100644
--- a/iocore/eventsystem/UnixEventProcessor.cc
+++ b/iocore/eventsystem/UnixEventProcessor.cc
@@ -47,7 +47,7 @@ public:
   void init();
   /// Set the affinity for the current thread.
   int set_affinity(int, Event *);
-  /// Allocate a stack.
+  /// Allocate a stack and set guard pages.
   /// @internal This is the external entry point and is different depending on
   /// whether HWLOC is enabled.
   void *alloc_stack(EThread *t, size_t stacksize);
@@ -55,7 +55,8 @@ public:
 protected:
   /// Allocate a hugepage stack.
   /// If huge pages are not enable, allocate a basic stack.
-  void *alloc_hugepage_stack(size_t stacksize);
+  void *do_alloc_stack(size_t stacksize);
+  void setup_stack_guard(void *stack, int stackguard_pages);
 
 #if TS_USE_HWLOC
 
@@ -136,10 +137,54 @@ public:
 } Thread_Init_Func;
 } // namespace
 
+void
+ThreadAffinityInitializer::setup_stack_guard(void *stack, int stackguard_pages)
+{
+#if !(defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__mips__))
+#error Unknown stack growth direction.  Determine the stack growth direction of your platform.
+// If your stack grows upwards, you need to change this function and the calculation of stack_begin in do_alloc_stack.
+#endif
+  // Assumption: stack grows down
+  if (stackguard_pages <= 0) {
+    return;
+  }
+
+  size_t pagesize  = ats_hugepage_enabled() ? ats_hugepage_size() : ats_pagesize();
+  size_t guardsize = stackguard_pages * pagesize;
+  int ret          = mprotect(stack, guardsize, 0);
+  if (ret != 0) {
+    Fatal("Failed to set up stack guard pages: %s (%d)", strerror(errno), errno);
+  }
+}
+
 void *
-ThreadAffinityInitializer::alloc_hugepage_stack(size_t stacksize)
+ThreadAffinityInitializer::do_alloc_stack(size_t stacksize)
 {
-  return ats_hugepage_enabled() ? ats_alloc_hugepage(stacksize) : ats_memalign(ats_pagesize(), stacksize);
+  size_t pagesize = ats_hugepage_enabled() ? ats_hugepage_size() : ats_pagesize();
+  int stackguard_pages;
+  REC_ReadConfigInteger(stackguard_pages, "proxy.config.thread.default.stackguard_pages");
+  ink_release_assert(stackguard_pages >= 0);
+
+  size_t size    = INK_ALIGN(stacksize + stackguard_pages * pagesize, pagesize);
+  int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#ifdef MAP_HUGETLB
+  if (ats_hugepage_enabled()) {
+    mmap_flags |= MAP_HUGETLB;
+  }
+#endif
+  void *stack_and_guard = mmap(nullptr, size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
+  if (stack_and_guard == MAP_FAILED) {
+    Error("Failed to allocate stack pages: size = %zu", size);
+    return nullptr;
+  }
+
+  setup_stack_guard(stack_and_guard, stackguard_pages);
+
+  void *stack_begin = static_cast<char *>(stack_and_guard) + stackguard_pages * pagesize;
+  Debug("iocore_thread", "Allocated %zu bytes (%zu bytes in guard pages) for stack {%p-%p guard, %p-%p stack}", size,
+        stackguard_pages * pagesize, stack_and_guard, stack_begin, stack_begin, static_cast<char *>(stack_begin) + stacksize);
+
+  return stack_begin;
 }
 
 #if TS_USE_HWLOC
@@ -239,7 +284,7 @@ ThreadAffinityInitializer::alloc_numa_stack(EThread *t, size_t stacksize)
   }
 
   // Alloc our stack
-  stack = this->alloc_hugepage_stack(stacksize);
+  stack = this->do_alloc_stack(stacksize);
 
   if (mem_policy != HWLOC_MEMBIND_DEFAULT) {
     // Now let's set it back to default for this thread.
@@ -260,7 +305,7 @@ ThreadAffinityInitializer::alloc_numa_stack(EThread *t, size_t stacksize)
 void *
 ThreadAffinityInitializer::alloc_stack(EThread *t, size_t stacksize)
 {
-  return this->obj_count > 0 ? this->alloc_numa_stack(t, stacksize) : this->alloc_hugepage_stack(stacksize);
+  return this->obj_count > 0 ? this->alloc_numa_stack(t, stacksize) : this->do_alloc_stack(stacksize);
 }
 
 #else
@@ -279,7 +324,7 @@ ThreadAffinityInitializer::set_affinity(int, Event *)
 void *
 ThreadAffinityInitializer::alloc_stack(EThread *, size_t stacksize)
 {
-  return this->alloc_hugepage_stack(stacksize);
+  return this->do_alloc_stack(stacksize);
 }
 
 #endif // TS_USE_HWLOC
diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc
index d2d1fd2ef..05c71811c 100644
--- a/mgmt/RecordsConfig.cc
+++ b/mgmt/RecordsConfig.cc
@@ -125,6 +125,8 @@ static const RecordElement RecordsConfig[] =
   ,
   {RECT_CONFIG, "proxy.config.thread.default.stacksize", RECD_INT, "1048576", RECU_RESTART_TS, RR_NULL, RECC_INT, "[131072-104857600]", RECA_READ_ONLY}
   ,
+  {RECT_CONFIG, "proxy.config.thread.default.stackguard_pages", RECD_INT, "1", RECU_RESTART_TS, RR_NULL, RECC_INT, "[1-256]", RECA_READ_ONLY}
+  ,
   {RECT_CONFIG, "proxy.config.restart.active_client_threshold", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
   ,
   {RECT_CONFIG, "proxy.config.restart.stop_listening", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL}