You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by xi...@apache.org on 2023/01/16 12:32:24 UTC

[nuttx] branch master updated (415a09115d -> cb404167a7)

This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


    from 415a09115d boards/sim/windows: enable custom options
     new 7cd325f3be mm/mm_heap: remove kasan in MM_ADD_BACKTRACE
     new c82f44c4f3 mm/mm_heap: add mempool to optimize small block performance
     new bdcb1f6a25 fix:mmsize_t need support 64bit
     new cb404167a7 mm/tlsf:add mempool to optimize small block perfomance

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 arch/sim/src/sim/sim_backtrace.c |  7 ++-
 mm/Kconfig                       | 16 +++++++
 mm/mm_heap/mm.h                  | 22 +++++++--
 mm/mm_heap/mm_free.c             |  8 ++++
 mm/mm_heap/mm_initialize.c       | 14 ++++++
 mm/mm_heap/mm_malloc.c           |  8 ++++
 mm/mm_heap/mm_malloc_size.c      |  7 +++
 mm/mm_heap/mm_memalign.c         | 10 +++-
 mm/mm_heap/mm_realloc.c          | 36 +++++++++++++++
 mm/tlsf/mm_tlsf.c                | 98 +++++++++++++++++++++++++++++++++++++++-
 10 files changed, 218 insertions(+), 8 deletions(-)


[nuttx] 02/04: mm/mm_heap: add mempool to optimize small block performance

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit c82f44c4f3fca2fc08678edce223b434f819ead1
Author: dongjiuzhu1 <do...@xiaomi.com>
AuthorDate: Sun Oct 30 11:34:24 2022 +0800

    mm/mm_heap: add mempool to optimize small block performance
    
    There are many small memory block in NuttX system, eg: struct tcb_s,
    struct inode, etc, and several disadvantages about them:
    1.Their frequent allocate and free cause the system memory fragmentation.
    2.Since each memory block has an overhead, the utilization of small memory
    blocks is relatively low, which will cause memory waste.
    
    So we can use mempool to alloc smallo block, to improve alloc speed
    and utilization, to reduce fragmentation.
    
    Signed-off-by: dongjiuzhu1 <do...@xiaomi.com>
---
 mm/Kconfig                  | 16 ++++++++++++++++
 mm/mm_heap/mm.h             | 14 ++++++++++++++
 mm/mm_heap/mm_free.c        |  8 ++++++++
 mm/mm_heap/mm_initialize.c  | 14 ++++++++++++++
 mm/mm_heap/mm_malloc.c      |  8 ++++++++
 mm/mm_heap/mm_malloc_size.c |  7 +++++++
 mm/mm_heap/mm_memalign.c    | 10 +++++++++-
 mm/mm_heap/mm_realloc.c     | 36 ++++++++++++++++++++++++++++++++++++
 8 files changed, 112 insertions(+), 1 deletion(-)

diff --git a/mm/Kconfig b/mm/Kconfig
index d9da188d56..1ef8a1305f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -172,6 +172,22 @@ config MM_SHM
 		Build in support for the shared memory interfaces shmget(), shmat(),
 		shmctl(), and shmdt().
 
+config MM_HEAP_MEMPOOL_THRESHOLD
+	int "The size of threshold to avoid using multiple mempool in heap"
+	default 0
+	---help---
+		If the size of the memory requested by the user is less
+		than the threshold, the memory will be requested from the
+		multiple mempool by default.
+
+config MM_HEAP_MEMPOOL_EXPAND
+	int "The expand size for each mempool in multiple mempool"
+	default 1024
+	depends on MM_HEAP_MEMPOOL_THRESHOLD != 0
+	---help---
+		This size describes the size of each expansion of each memory
+		pool with insufficient memory in the multi-level memory pool.
+
 config FS_PROCFS_EXCLUDE_MEMPOOL
 	bool "Exclude mempool"
 	default DEFAULT_SMALL
diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index 23f5795bc3..4bb919ac1e 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -31,6 +31,7 @@
 #include <nuttx/sched.h>
 #include <nuttx/fs/procfs.h>
 #include <nuttx/lib/math32.h>
+#include <nuttx/mm/mempool.h>
 
 #include <assert.h>
 #include <execinfo.h>
@@ -133,6 +134,11 @@
 
 #define SIZEOF_MM_FREENODE sizeof(struct mm_freenode_s)
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+#  define MM_IS_FROM_MEMPOOL(mem) \
+  ((*((FAR mmsize_t *)mem - 1) & MM_ALLOC_BIT) == 0)
+#endif
+
 /****************************************************************************
  * Public Types
  ****************************************************************************/
@@ -225,6 +231,14 @@ struct mm_heap_s
 
   FAR struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
 
+  /* The is a multiple mempool of the heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  struct mempool_multiple_s mm_mpool;
+  struct mempool_s mm_pools[CONFIG_MM_HEAP_MEMPOOL_THRESHOLD /
+                            sizeof(uintptr_t)];
+#endif
+
 #if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
   struct procfs_meminfo_entry_s mm_procfs;
 #endif
diff --git a/mm/mm_heap/mm_free.c b/mm/mm_heap/mm_free.c
index 66f3707bda..c8510d9a04 100644
--- a/mm/mm_heap/mm_free.c
+++ b/mm/mm_heap/mm_free.c
@@ -82,6 +82,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
       return;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      mempool_multiple_free(&heap->mm_mpool, mem);
+      return;
+    }
+#endif
+
   if (mm_lock(heap) < 0)
     {
       /* Meet -ESRCH return, which means we are in situations
diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c
index c10f7cde7b..c0fe89d27b 100644
--- a/mm/mm_heap/mm_initialize.c
+++ b/mm/mm_heap/mm_initialize.c
@@ -226,6 +226,20 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
 #  endif
 #endif
 
+  /* Initialize the multiple mempool in heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  heap->mm_mpool.pools = heap->mm_pools;
+  heap->mm_mpool.npools = sizeof(heap->mm_pools) / sizeof(heap->mm_pools[0]);
+  for (i = 0; i < heap->mm_mpool.npools; i++)
+    {
+      heap->mm_pools[i].blocksize = (i + 1) * sizeof(uintptr_t);
+      heap->mm_pools[i].expandsize = CONFIG_MM_HEAP_MEMPOOL_EXPAND;
+    }
+
+  mempool_multiple_init(&heap->mm_mpool, name);
+#endif
+
   /* Add the initial region of memory to the heap */
 
   mm_addregion(heap, heapstart, heapsize);
diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c
index 303fef3e35..389321273a 100644
--- a/mm/mm_heap/mm_malloc.c
+++ b/mm/mm_heap/mm_malloc.c
@@ -120,6 +120,14 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  ret = mempool_multiple_alloc(&heap->mm_mpool, size);
+  if (ret != NULL)
+    {
+      return ret;
+    }
+#endif
+
   /* Adjust the size to account for (1) the size of the allocated node and
    * (2) to make sure that it is an even multiple of our granule size.
    */
diff --git a/mm/mm_heap/mm_malloc_size.c b/mm/mm_heap/mm_malloc_size.c
index 98ffde645a..bb766305db 100644
--- a/mm/mm_heap/mm_malloc_size.c
+++ b/mm/mm_heap/mm_malloc_size.c
@@ -46,6 +46,13 @@ size_t mm_malloc_size(FAR void *mem)
       return 0;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      return mempool_multiple_alloc_size(mem);
+    }
+#endif
+
   /* Map the memory chunk into a free node */
 
   node = (FAR struct mm_freenode_s *)((FAR char *)mem - SIZEOF_MM_ALLOCNODE);
diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c
index 45b61ee6b6..d8e30942b7 100644
--- a/mm/mm_heap/mm_memalign.c
+++ b/mm/mm_heap/mm_memalign.c
@@ -43,7 +43,7 @@
  *   within that chunk that meets the alignment request and then frees any
  *   leading or trailing space.
  *
- *   The alignment argument must be a power of two.  8-byte alignment is
+ *   The alignment argument must be a power of two. 16-byte alignment is
  *   guaranteed by normal malloc calls.
  *
  ****************************************************************************/
@@ -72,6 +72,14 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  node = mempool_multiple_memalign(&heap->mm_mpool, alignment, size);
+  if (node != NULL)
+    {
+      return node;
+    }
+#endif
+
   /* If this requested alinement's less than or equal to the natural
    * alignment of malloc, then just let malloc do the work.
    */
diff --git a/mm/mm_heap/mm_realloc.c b/mm/mm_heap/mm_realloc.c
index c928a46416..efb4b8c3d2 100644
--- a/mm/mm_heap/mm_realloc.c
+++ b/mm/mm_heap/mm_realloc.c
@@ -34,6 +34,12 @@
 #include "mm_heap/mm.h"
 #include "kasan/kasan.h"
 
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
 /****************************************************************************
  * Public Functions
  ****************************************************************************/
@@ -88,6 +94,36 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(oldmem))
+    {
+      newmem = mempool_multiple_realloc(&heap->mm_mpool, oldmem, size);
+      if (newmem != NULL)
+        {
+          return newmem;
+        }
+
+      newmem = mm_malloc(heap, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, mempool_multiple_alloc_size(oldmem));
+          mempool_multiple_free(&heap->mm_mpool, oldmem);
+        }
+
+      return newmem;
+    }
+  else
+    {
+      newmem = mempool_multiple_alloc(&heap->mm_mpool, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, MIN(size, mm_malloc_size(oldmem)));
+          mm_free(heap, oldmem);
+          return newmem;
+        }
+    }
+#endif
+
   /* Adjust the size to account for (1) the size of the allocated node and
    * (2) to make sure that it is an even multiple of our granule size.
    */


[nuttx] 03/04: fix:mmsize_t need support 64bit

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit bdcb1f6a25959bab54b0599a5703d5f0110f4d7b
Author: anjiahao <an...@xiaomi.com>
AuthorDate: Wed Nov 16 14:22:19 2022 +0800

    fix:mmsize_t need support 64bit
    
    Signed-off-by: anjiahao <an...@xiaomi.com>
---
 mm/mm_heap/mm.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index 4bb919ac1e..10ee0596ac 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -148,7 +148,7 @@
 #ifdef CONFIG_MM_SMALL
 typedef uint16_t mmsize_t;
 #else
-typedef uint32_t mmsize_t;
+typedef size_t mmsize_t;
 #endif
 
 /* This describes an allocated chunk.  An allocated chunk is


[nuttx] 04/04: mm/tlsf:add mempool to optimize small block perfomance

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit cb404167a73b05230523a42e12f3b54a59053abe
Author: anjiahao <an...@xiaomi.com>
AuthorDate: Mon Nov 7 21:55:13 2022 +0800

    mm/tlsf:add mempool to optimize small block perfomance
    
    Signed-off-by: anjiahao <an...@xiaomi.com>
---
 mm/tlsf/mm_tlsf.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 97 insertions(+), 1 deletion(-)

diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c
index a4a41b130f..c2cb433518 100644
--- a/mm/tlsf/mm_tlsf.c
+++ b/mm/tlsf/mm_tlsf.c
@@ -38,6 +38,7 @@
 #include <nuttx/fs/procfs.h>
 #include <nuttx/mutex.h>
 #include <nuttx/mm/mm.h>
+#include <nuttx/mm/mempool.h>
 
 #include "tlsf/tlsf.h"
 #include "kasan/kasan.h"
@@ -52,6 +53,14 @@
 #  define MM_PTR_FMT_WIDTH 19
 #endif
 
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+#  define MM_MPOOL_BIT (1 << 0)
+#  define MM_IS_FROM_MEMPOOL(mem) \
+          ((*((FAR size_t *)(mem) - 1)) & MM_MPOOL_BIT) == 0
+#endif
+
 /****************************************************************************
  * Private Types
  ****************************************************************************/
@@ -84,6 +93,14 @@ struct mm_heap_s
 
   tlsf_t mm_tlsf; /* The tlfs context */
 
+  /* The is a multiple mempool of the heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  struct mempool_multiple_s mm_mpool;
+  struct mempool_s mm_pools[CONFIG_MM_HEAP_MEMPOOL_THRESHOLD /
+                            sizeof(uintptr_t)];
+#endif
+
   /* Free delay list, for some situation can't do free immdiately */
 
 #ifdef CONFIG_SMP
@@ -617,6 +634,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
       return;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      mempool_multiple_free(&heap->mm_mpool, mem);
+      return;
+    }
+#endif
+
   if (mm_lock(heap) == 0)
     {
       kasan_poison(mem, mm_malloc_size(mem));
@@ -714,6 +739,9 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
                                     FAR void *heapstart, size_t heapsize)
 {
   FAR struct mm_heap_s *heap;
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  int i;
+#endif
 
   minfo("Heap: name=%s start=%p size=%zu\n", name, heapstart, heapsize);
 
@@ -725,6 +753,20 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
   heapstart += sizeof(struct mm_heap_s);
   heapsize -= sizeof(struct mm_heap_s);
 
+  /* Initialize the multiple mempool in heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  heap->mm_mpool.pools = heap->mm_pools;
+  heap->mm_mpool.npools = sizeof(heap->mm_pools) / sizeof(heap->mm_pools[0]);
+  for (i = 0; i < heap->mm_mpool.npools; i++)
+    {
+      heap->mm_pools[i].blocksize = (i + 1) * sizeof(uintptr_t);
+      heap->mm_pools[i].expandsize = CONFIG_MM_HEAP_MEMPOOL_EXPAND;
+    }
+
+  mempool_multiple_init(&heap->mm_mpool, name);
+#endif
+
   /* Allocate and create TLSF context */
 
   DEBUGASSERT(heapsize > tlsf_size());
@@ -887,6 +929,13 @@ void mm_memdump(FAR struct mm_heap_s *heap, pid_t pid)
 
 size_t mm_malloc_size(FAR void *mem)
 {
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      return mempool_multiple_alloc_size(mem);
+    }
+#endif
+
 #if CONFIG_MM_BACKTRACE >= 0
   return tlsf_block_size(mem) - sizeof(struct memdump_backtrace_s);
 #else
@@ -909,6 +958,14 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
 {
   FAR void *ret;
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  ret = mempool_multiple_alloc(&heap->mm_mpool, size);
+  if (ret != NULL)
+    {
+      return ret;
+    }
+#endif
+
   /* Free the delay list first */
 
   free_delaylist(heap);
@@ -956,6 +1013,14 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
 {
   FAR void *ret;
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  ret = mempool_multiple_memalign(&heap->mm_mpool, alignment, size);
+  if (ret != NULL)
+    {
+      return ret;
+    }
+#endif
+
   /* Free the delay list first */
 
   free_delaylist(heap);
@@ -1012,7 +1077,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
 {
   FAR void *newmem;
 
-#ifdef CONFIG_MM_KASAN
   if (oldmem == NULL)
     {
       return mm_malloc(heap, size);
@@ -1024,6 +1088,38 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(oldmem))
+    {
+      newmem = mempool_multiple_realloc(&heap->mm_mpool, oldmem, size);
+      if (newmem != NULL)
+        {
+          return newmem;
+        }
+
+      newmem = mm_malloc(heap, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, size);
+          mempool_multiple_free(&heap->mm_mpool, oldmem);
+        }
+
+      return newmem;
+    }
+  else
+    {
+      newmem = mempool_multiple_alloc(&heap->mm_mpool, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, MIN(size, mm_malloc_size(oldmem)));
+          mm_free(heap, oldmem);
+          return newmem;
+        }
+    }
+#endif
+
+#ifdef CONFIG_MM_KASAN
+
   newmem = mm_malloc(heap, size);
   if (newmem)
     {


[nuttx] 01/04: mm/mm_heap: remove kasan in MM_ADD_BACKTRACE

Posted by xi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 7cd325f3be58966823dbe545eaf239ea1646db25
Author: dongjiuzhu1 <do...@xiaomi.com>
AuthorDate: Fri Oct 28 22:51:30 2022 +0800

    mm/mm_heap: remove kasan in MM_ADD_BACKTRACE
    
    do simple copy to instead of memset and memcpy operation because
    they have been instrumented, if you access the posion area,
    the system will crash.
    
    Signed-off-by: dongjiuzhu1 <do...@xiaomi.com>
---
 arch/sim/src/sim/sim_backtrace.c | 7 ++++++-
 mm/mm_heap/mm.h                  | 6 ++----
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/sim/src/sim/sim_backtrace.c b/arch/sim/src/sim/sim_backtrace.c
index e084041db0..3ebeedac8a 100644
--- a/arch/sim/src/sim/sim_backtrace.c
+++ b/arch/sim/src/sim/sim_backtrace.c
@@ -33,10 +33,12 @@
  * Public Functions
  ****************************************************************************/
 
+nosanitize_address
 int up_backtrace(struct tcb_s *tcb, void **buffer, int size, int skip)
 {
   void *buf[skip + size];
   int ret = 0;
+  int i;
 
   if (tcb == running_task())
     {
@@ -49,7 +51,10 @@ int up_backtrace(struct tcb_s *tcb, void **buffer, int size, int skip)
     }
 
   ret -= skip;
-  memcpy(buffer, &buf[skip], ret * sizeof(void *));
+  for (i = 0; i < ret; i++)
+    {
+      buffer[i] = buf[skip + i];
+    }
 
   return ret;
 }
diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index 4a980eccd8..23f5795bc3 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -82,23 +82,21 @@
      do \
        { \
          FAR struct mm_allocnode_s *tmp = (FAR struct mm_allocnode_s *)(ptr); \
-         kasan_unpoison(tmp, SIZEOF_MM_ALLOCNODE); \
          FAR struct tcb_s *tcb; \
          tmp->pid = gettid(); \
          tcb = nxsched_get_tcb(tmp->pid); \
          if ((heap)->mm_procfs.backtrace || (tcb && tcb->flags & TCB_FLAG_HEAP_DUMP)) \
            { \
              int n = backtrace(tmp->backtrace, CONFIG_MM_BACKTRACE); \
-             if (n < CONFIG_MM_BACKTRACE) \
+             while (n < CONFIG_MM_BACKTRACE) \
                { \
-                 tmp->backtrace[n] = 0; \
+                 tmp->backtrace[n++] = NULL; \
                } \
            } \
          else \
            { \
              tmp->backtrace[0] = 0; \
            } \
-         kasan_poison(tmp, SIZEOF_MM_ALLOCNODE); \
        } \
      while (0)
 #else