You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by ag...@apache.org on 2020/05/05 17:56:41 UTC

[incubator-nuttx] 02/04: TLS_UNALIGNED (#2)

This is an automated email from the ASF dual-hosted git repository.

aguettouche pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit b7e7fba732a1cf51edf66ac5d48b99e142ad6d6a
Author: Abdelatif Guettouche <ab...@gmail.com>
AuthorDate: Tue May 5 16:18:25 2020 +0100

    TLS_UNALIGNED (#2)
    
    * Implement the TLS_UNALIGNED
---
 arch/arm/include/tls.h                | 16 ++++++++++++++++
 arch/arm/src/common/arm_checkstack.c  |  2 +-
 arch/arm/src/common/arm_createstack.c |  8 +++++---
 arch/arm/src/common/arm_usestack.c    |  2 +-
 arch/sim/include/tls.h                | 16 ++++++++++++++++
 arch/sim/src/sim/up_checkstack.c      |  2 +-
 arch/sim/src/sim/up_createstack.c     |  4 +++-
 arch/sim/src/sim/up_usestack.c        |  2 +-
 include/nuttx/tls.h                   | 17 +++++++++++------
 libs/libc/tls/Kconfig                 |  8 ++++----
 10 files changed, 59 insertions(+), 18 deletions(-)

diff --git a/arch/arm/include/tls.h b/arch/arm/include/tls.h
index 97e0d9c..d979f9a 100644
--- a/arch/arm/include/tls.h
+++ b/arch/arm/include/tls.h
@@ -99,8 +99,24 @@ static inline uint32_t up_getsp(void)
 
 static inline FAR struct tls_info_s *up_tls_info(void)
 {
+#ifdef CONFIG_TLS_ALIGNED
   DEBUGASSERT(!up_interrupt_context());
   return TLS_INFO((uintptr_t)up_getsp());
+#else
+  FAR struct tls_info_s *info = NULL;
+  struct stackinfo_s stackinfo;
+  int ret;
+  
+  DEBUGASSERT(!up_interrupt_context());
+
+  ret = sched_get_stackinfo(0, &stackinfo);
+  if (ret == OK)
+    {
+      info = (FAR struct tsl_info_s *)stackinfo.adj_stack_ptr;
+    }
+
+  return info;
+#endif
 }
 
 #endif /* CONFIG_TLS */
diff --git a/arch/arm/src/common/arm_checkstack.c b/arch/arm/src/common/arm_checkstack.c
index af57a57..5ac36fd 100644
--- a/arch/arm/src/common/arm_checkstack.c
+++ b/arch/arm/src/common/arm_checkstack.c
@@ -92,7 +92,7 @@ static size_t do_stackcheck(uintptr_t alloc, size_t size, bool int_stack)
 
   /* Get aligned addresses of the top and bottom of the stack */
 
-#ifdef CONFIG_TLS
+#ifdef CONFIG_TLS_ALIGNED
   if (!int_stack)
     {
       /* Skip over the TLS data structure at the bottom of the stack */
diff --git a/arch/arm/src/common/arm_createstack.c b/arch/arm/src/common/arm_createstack.c
index 4600d40..62195dd 100644
--- a/arch/arm/src/common/arm_createstack.c
+++ b/arch/arm/src/common/arm_createstack.c
@@ -107,6 +107,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
 
   stack_size += sizeof(struct tls_info_s);
 
+#ifdef CONFIG_TLS_ALIGNED
   /* The allocated stack size must not exceed the maximum possible for the
    * TLS feature.
    */
@@ -117,6 +118,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
       stack_size = TLS_MAXSTACK;
     }
 #endif
+#endif
 
   /* Is there already a stack allocated of a different size?  Because of
    * alignment issues, stack_size might erroneously appear to be of a
@@ -139,7 +141,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
        * If TLS is enabled, then we must allocate aligned stacks.
        */
 
-#ifdef CONFIG_TLS
+#ifdef CONFIG_TLS_ALIGNED
 #ifdef CONFIG_MM_KERNEL_HEAP
       /* Use the kernel allocator if this is a kernel thread */
 
@@ -157,7 +159,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
             (uint32_t *)kumm_memalign(TLS_STACK_ALIGN, stack_size);
         }
 
-#else /* CONFIG_TLS */
+#else /* CONFIG_TLS_ALIGNED */
 #ifdef CONFIG_MM_KERNEL_HEAP
       /* Use the kernel allocator if this is a kernel thread */
 
@@ -172,7 +174,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
 
           tcb->stack_alloc_ptr = (uint32_t *)kumm_malloc(stack_size);
         }
-#endif /* CONFIG_TLS */
+#endif /* CONFIG_TLS_ALIGNED */
 
 #ifdef CONFIG_DEBUG_FEATURES
       /* Was the allocation successful? */
diff --git a/arch/arm/src/common/arm_usestack.c b/arch/arm/src/common/arm_usestack.c
index 0d44322..b5b2c67 100644
--- a/arch/arm/src/common/arm_usestack.c
+++ b/arch/arm/src/common/arm_usestack.c
@@ -90,7 +90,7 @@ int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
   size_t top_of_stack;
   size_t size_of_stack;
 
-#ifdef CONFIG_TLS
+#ifdef CONFIG_TLS_ALIGNED
   /* Make certain that the user provided stack is properly aligned */
 
   DEBUGASSERT(((uintptr_t)stack & TLS_STACK_MASK) == 0);
diff --git a/arch/sim/include/tls.h b/arch/sim/include/tls.h
index 1f768b1..edbadc4 100644
--- a/arch/sim/include/tls.h
+++ b/arch/sim/include/tls.h
@@ -81,7 +81,23 @@
 
 static inline FAR struct tls_info_s *up_tls_info(void)
 {
+#ifdef CONFIG_TLS_ALIGNED
   return TLS_INFO((uintptr_t)__builtin_frame_address(0));
+#else
+  FAR struct tls_info_s *info = NULL;
+  struct stackinfo_s stackinfo;
+  int ret;
+  
+  DEBUGASSERT(!up_interrupt_context());
+
+  ret = sched_get_stackinfo(0, &stackinfo);
+  if (ret == OK)
+    {
+      info = (FAR struct tsl_info_s *)stackinfo.adj_stack_ptr;
+    }
+
+  return info;
+#endif
 }
 
 #endif /* CONFIG_TLS */
diff --git a/arch/sim/src/sim/up_checkstack.c b/arch/sim/src/sim/up_checkstack.c
index f8be93f..955ea6b 100644
--- a/arch/sim/src/sim/up_checkstack.c
+++ b/arch/sim/src/sim/up_checkstack.c
@@ -89,7 +89,7 @@ static size_t do_stackcheck(uintptr_t alloc, size_t size, bool int_stack)
 
   /* Get aligned addresses of the top and bottom of the stack */
 
-#ifdef CONFIG_TLS
+#ifdef CONFIG_TLS_ALIGNED
   if (!int_stack)
     {
       /* Skip over the TLS data structure at the bottom of the stack */
diff --git a/arch/sim/src/sim/up_createstack.c b/arch/sim/src/sim/up_createstack.c
index 1c04701..8a85f93 100644
--- a/arch/sim/src/sim/up_createstack.c
+++ b/arch/sim/src/sim/up_createstack.c
@@ -111,6 +111,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
 
   stack_size += sizeof(struct tls_info_s);
 
+#ifdef CONFIG_TLS_ALIGNED
   /* The allocated stack size must not exceed the maximum possible for the
    * TLS feature.
    */
@@ -121,6 +122,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
       stack_size = TLS_MAXSTACK;
     }
 #endif
+#endif
 
   /* Move up to next even word boundary if necessary */
 
@@ -128,7 +130,7 @@ int up_create_stack(FAR struct tcb_s *tcb, size_t stack_size, uint8_t ttype)
 
   /* Allocate the memory for the stack */
 
-#ifdef CONFIG_TLS
+#ifdef CONFIG_TLS_ALIGNED
   stack_alloc_ptr = (FAR uint8_t *)kumm_memalign(TLS_STACK_ALIGN,
                                                  adj_stack_size);
 #else /* CONFIG_TLS */
diff --git a/arch/sim/src/sim/up_usestack.c b/arch/sim/src/sim/up_usestack.c
index 8703f23..9325568 100644
--- a/arch/sim/src/sim/up_usestack.c
+++ b/arch/sim/src/sim/up_usestack.c
@@ -101,7 +101,7 @@ int up_use_stack(FAR struct tcb_s *tcb, FAR void *stack, size_t stack_size)
   uintptr_t adj_stack_addr;
   size_t adj_stack_size;
 
-#ifdef CONFIG_TLS
+#ifdef CONFIG_TLS_ALIGNED
   /* Make certain that the user provided stack is properly aligned */
 
   DEBUGASSERT(((uintptr_t)stack & TLS_STACK_MASK) == 0);
diff --git a/include/nuttx/tls.h b/include/nuttx/tls.h
index 39c9529..f188f02 100644
--- a/include/nuttx/tls.h
+++ b/include/nuttx/tls.h
@@ -51,8 +51,10 @@
  ****************************************************************************/
 /* Configuration ************************************************************/
 
-#ifndef CONFIG_TLS_LOG2_MAXSTACK
-#  error CONFIG_TLS_LOG2_MAXSTACK is not defined
+#ifdef CONFIG_TLS_ALIGNED
+#  ifndef CONFIG_TLS_LOG2_MAXSTACK
+#    error CONFIG_TLS_LOG2_MAXSTACK is not defined
+#  endif
 #endif
 
 #ifndef CONFIG_TLS_NELEM
@@ -68,14 +70,17 @@
 
 /* TLS Definitions **********************************************************/
 
-#define TLS_STACK_ALIGN   (1L << CONFIG_TLS_LOG2_MAXSTACK)
-#define TLS_STACK_MASK    (TLS_STACK_ALIGN - 1)
-#define TLS_MAXSTACK      (TLS_STACK_ALIGN)
-#define TLS_INFO(sp)      ((FAR struct tls_info_s *)((sp) & ~TLS_STACK_MASK))
+#ifdef CONFIG_TLS_ALIGNED
+#  define TLS_STACK_ALIGN  (1L << CONFIG_TLS_LOG2_MAXSTACK)
+#  define TLS_STACK_MASK   (TLS_STACK_ALIGN - 1)
+#  define TLS_MAXSTACK     (TLS_STACK_ALIGN)
+#  define TLS_INFO(sp)     ((FAR struct tls_info_s *)((sp) & ~TLS_STACK_MASK))
+#endif
 
 /****************************************************************************
  * Public Types
  ****************************************************************************/
+
 /* When TLS is enabled, up_createstack() will align allocated stacks to the
  * TLS_STACK_ALIGN value.  An instance of the following structure will be
  * implicitly positioned at the "lower" end of the stack.  Assuming a
diff --git a/libs/libc/tls/Kconfig b/libs/libc/tls/Kconfig
index 55d0050..76b53ad 100644
--- a/libs/libc/tls/Kconfig
+++ b/libs/libc/tls/Kconfig
@@ -26,23 +26,23 @@ config TLS_ALIGNED
 	default y if BUILD_KERNEL
 	default n if !BUILD_KERNEL
 	---help---
-		Aligned TLS works by fetch thread information from the beginning
+		Aligned TLS works by fetching thread information from the beginning
 		of the stack memory allocation.  In order to do this, the memory
 		must be aligned in such a way that the executing logic can simply
 		mask the current stack pointer to get the beginning of the stack
 		allocation.
 
 		The advantage of using an aligned stack is no OS interface need
-		be called to get the beginning of the stack.  It is simply and
+		be called to get the beginning of the stack.  It is simply an
 		AND operation on the current stack pointer.  The disadvantages
-		are that the alignment (1) causes memory fragmentation which
+		are that the alignment (1) causes memory fragmentation which can
 		be a serious problem for memory limited systems, and (2) limits
 		the maximum size of the stack.  Any mask places a limit on the
 		maximum size of the stack; stack sizes about that would map to
 		an incorrect address.
 
 		In general, CONFIG_TLS_ALIGNED is preferred for the KERNEL
-		build where the virtualized stack address an be aligned with
+		build where the virtualized stack address can be aligned with
 		no implications to physical memory.  In other builds, the
 		unaligned stack implementation is usually superior.