You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by xi...@apache.org on 2023/01/16 12:32:28 UTC

[nuttx] 04/04: mm/tlsf:add mempool to optimize small block perfomance

This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit cb404167a73b05230523a42e12f3b54a59053abe
Author: anjiahao <an...@xiaomi.com>
AuthorDate: Mon Nov 7 21:55:13 2022 +0800

    mm/tlsf:add mempool to optimize small block perfomance
    
    Signed-off-by: anjiahao <an...@xiaomi.com>
---
 mm/tlsf/mm_tlsf.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 97 insertions(+), 1 deletion(-)

diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c
index a4a41b130f..c2cb433518 100644
--- a/mm/tlsf/mm_tlsf.c
+++ b/mm/tlsf/mm_tlsf.c
@@ -38,6 +38,7 @@
 #include <nuttx/fs/procfs.h>
 #include <nuttx/mutex.h>
 #include <nuttx/mm/mm.h>
+#include <nuttx/mm/mempool.h>
 
 #include "tlsf/tlsf.h"
 #include "kasan/kasan.h"
@@ -52,6 +53,14 @@
 #  define MM_PTR_FMT_WIDTH 19
 #endif
 
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+#  define MM_MPOOL_BIT (1 << 0)
+#  define MM_IS_FROM_MEMPOOL(mem) \
+          ((*((FAR size_t *)(mem) - 1)) & MM_MPOOL_BIT) == 0
+#endif
+
 /****************************************************************************
  * Private Types
  ****************************************************************************/
@@ -84,6 +93,14 @@ struct mm_heap_s
 
   tlsf_t mm_tlsf; /* The tlfs context */
 
+  /* The is a multiple mempool of the heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  struct mempool_multiple_s mm_mpool;
+  struct mempool_s mm_pools[CONFIG_MM_HEAP_MEMPOOL_THRESHOLD /
+                            sizeof(uintptr_t)];
+#endif
+
   /* Free delay list, for some situation can't do free immdiately */
 
 #ifdef CONFIG_SMP
@@ -617,6 +634,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
       return;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      mempool_multiple_free(&heap->mm_mpool, mem);
+      return;
+    }
+#endif
+
   if (mm_lock(heap) == 0)
     {
       kasan_poison(mem, mm_malloc_size(mem));
@@ -714,6 +739,9 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
                                     FAR void *heapstart, size_t heapsize)
 {
   FAR struct mm_heap_s *heap;
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  int i;
+#endif
 
   minfo("Heap: name=%s start=%p size=%zu\n", name, heapstart, heapsize);
 
@@ -725,6 +753,20 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
   heapstart += sizeof(struct mm_heap_s);
   heapsize -= sizeof(struct mm_heap_s);
 
+  /* Initialize the multiple mempool in heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  heap->mm_mpool.pools = heap->mm_pools;
+  heap->mm_mpool.npools = sizeof(heap->mm_pools) / sizeof(heap->mm_pools[0]);
+  for (i = 0; i < heap->mm_mpool.npools; i++)
+    {
+      heap->mm_pools[i].blocksize = (i + 1) * sizeof(uintptr_t);
+      heap->mm_pools[i].expandsize = CONFIG_MM_HEAP_MEMPOOL_EXPAND;
+    }
+
+  mempool_multiple_init(&heap->mm_mpool, name);
+#endif
+
   /* Allocate and create TLSF context */
 
   DEBUGASSERT(heapsize > tlsf_size());
@@ -887,6 +929,13 @@ void mm_memdump(FAR struct mm_heap_s *heap, pid_t pid)
 
 size_t mm_malloc_size(FAR void *mem)
 {
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      return mempool_multiple_alloc_size(mem);
+    }
+#endif
+
 #if CONFIG_MM_BACKTRACE >= 0
   return tlsf_block_size(mem) - sizeof(struct memdump_backtrace_s);
 #else
@@ -909,6 +958,14 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
 {
   FAR void *ret;
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  ret = mempool_multiple_alloc(&heap->mm_mpool, size);
+  if (ret != NULL)
+    {
+      return ret;
+    }
+#endif
+
   /* Free the delay list first */
 
   free_delaylist(heap);
@@ -956,6 +1013,14 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
 {
   FAR void *ret;
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  ret = mempool_multiple_memalign(&heap->mm_mpool, alignment, size);
+  if (ret != NULL)
+    {
+      return ret;
+    }
+#endif
+
   /* Free the delay list first */
 
   free_delaylist(heap);
@@ -1012,7 +1077,6 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
 {
   FAR void *newmem;
 
-#ifdef CONFIG_MM_KASAN
   if (oldmem == NULL)
     {
       return mm_malloc(heap, size);
@@ -1024,6 +1088,38 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(oldmem))
+    {
+      newmem = mempool_multiple_realloc(&heap->mm_mpool, oldmem, size);
+      if (newmem != NULL)
+        {
+          return newmem;
+        }
+
+      newmem = mm_malloc(heap, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, size);
+          mempool_multiple_free(&heap->mm_mpool, oldmem);
+        }
+
+      return newmem;
+    }
+  else
+    {
+      newmem = mempool_multiple_alloc(&heap->mm_mpool, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, MIN(size, mm_malloc_size(oldmem)));
+          mm_free(heap, oldmem);
+          return newmem;
+        }
+    }
+#endif
+
+#ifdef CONFIG_MM_KASAN
+
   newmem = mm_malloc(heap, size);
   if (newmem)
     {