You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by xi...@apache.org on 2023/01/16 12:32:26 UTC

[nuttx] 02/04: mm/mm_heap: add mempool to optimize small block performance

This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit c82f44c4f3fca2fc08678edce223b434f819ead1
Author: dongjiuzhu1 <do...@xiaomi.com>
AuthorDate: Sun Oct 30 11:34:24 2022 +0800

    mm/mm_heap: add mempool to optimize small block performance
    
    There are many small memory block in NuttX system, eg: struct tcb_s,
    struct inode, etc, and several disadvantages about them:
    1.Their frequent allocate and free cause the system memory fragmentation.
    2.Since each memory block has an overhead, the utilization of small memory
    blocks is relatively low, which will cause memory waste.
    
    So we can use mempool to alloc smallo block, to improve alloc speed
    and utilization, to reduce fragmentation.
    
    Signed-off-by: dongjiuzhu1 <do...@xiaomi.com>
---
 mm/Kconfig                  | 16 ++++++++++++++++
 mm/mm_heap/mm.h             | 14 ++++++++++++++
 mm/mm_heap/mm_free.c        |  8 ++++++++
 mm/mm_heap/mm_initialize.c  | 14 ++++++++++++++
 mm/mm_heap/mm_malloc.c      |  8 ++++++++
 mm/mm_heap/mm_malloc_size.c |  7 +++++++
 mm/mm_heap/mm_memalign.c    | 10 +++++++++-
 mm/mm_heap/mm_realloc.c     | 36 ++++++++++++++++++++++++++++++++++++
 8 files changed, 112 insertions(+), 1 deletion(-)

diff --git a/mm/Kconfig b/mm/Kconfig
index d9da188d56..1ef8a1305f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -172,6 +172,22 @@ config MM_SHM
 		Build in support for the shared memory interfaces shmget(), shmat(),
 		shmctl(), and shmdt().
 
+config MM_HEAP_MEMPOOL_THRESHOLD
+	int "The size of threshold to avoid using multiple mempool in heap"
+	default 0
+	---help---
+		If the size of the memory requested by the user is less
+		than the threshold, the memory will be requested from the
+		multiple mempool by default.
+
+config MM_HEAP_MEMPOOL_EXPAND
+	int "The expand size for each mempool in multiple mempool"
+	default 1024
+	depends on MM_HEAP_MEMPOOL_THRESHOLD != 0
+	---help---
+		This size describes the size of each expansion of each memory
+		pool with insufficient memory in the multi-level memory pool.
+
 config FS_PROCFS_EXCLUDE_MEMPOOL
 	bool "Exclude mempool"
 	default DEFAULT_SMALL
diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index 23f5795bc3..4bb919ac1e 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -31,6 +31,7 @@
 #include <nuttx/sched.h>
 #include <nuttx/fs/procfs.h>
 #include <nuttx/lib/math32.h>
+#include <nuttx/mm/mempool.h>
 
 #include <assert.h>
 #include <execinfo.h>
@@ -133,6 +134,11 @@
 
 #define SIZEOF_MM_FREENODE sizeof(struct mm_freenode_s)
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+#  define MM_IS_FROM_MEMPOOL(mem) \
+  ((*((FAR mmsize_t *)mem - 1) & MM_ALLOC_BIT) == 0)
+#endif
+
 /****************************************************************************
  * Public Types
  ****************************************************************************/
@@ -225,6 +231,14 @@ struct mm_heap_s
 
   FAR struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
 
+  /* The is a multiple mempool of the heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  struct mempool_multiple_s mm_mpool;
+  struct mempool_s mm_pools[CONFIG_MM_HEAP_MEMPOOL_THRESHOLD /
+                            sizeof(uintptr_t)];
+#endif
+
 #if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
   struct procfs_meminfo_entry_s mm_procfs;
 #endif
diff --git a/mm/mm_heap/mm_free.c b/mm/mm_heap/mm_free.c
index 66f3707bda..c8510d9a04 100644
--- a/mm/mm_heap/mm_free.c
+++ b/mm/mm_heap/mm_free.c
@@ -82,6 +82,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
       return;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      mempool_multiple_free(&heap->mm_mpool, mem);
+      return;
+    }
+#endif
+
   if (mm_lock(heap) < 0)
     {
       /* Meet -ESRCH return, which means we are in situations
diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c
index c10f7cde7b..c0fe89d27b 100644
--- a/mm/mm_heap/mm_initialize.c
+++ b/mm/mm_heap/mm_initialize.c
@@ -226,6 +226,20 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
 #  endif
 #endif
 
+  /* Initialize the multiple mempool in heap */
+
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  heap->mm_mpool.pools = heap->mm_pools;
+  heap->mm_mpool.npools = sizeof(heap->mm_pools) / sizeof(heap->mm_pools[0]);
+  for (i = 0; i < heap->mm_mpool.npools; i++)
+    {
+      heap->mm_pools[i].blocksize = (i + 1) * sizeof(uintptr_t);
+      heap->mm_pools[i].expandsize = CONFIG_MM_HEAP_MEMPOOL_EXPAND;
+    }
+
+  mempool_multiple_init(&heap->mm_mpool, name);
+#endif
+
   /* Add the initial region of memory to the heap */
 
   mm_addregion(heap, heapstart, heapsize);
diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c
index 303fef3e35..389321273a 100644
--- a/mm/mm_heap/mm_malloc.c
+++ b/mm/mm_heap/mm_malloc.c
@@ -120,6 +120,14 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  ret = mempool_multiple_alloc(&heap->mm_mpool, size);
+  if (ret != NULL)
+    {
+      return ret;
+    }
+#endif
+
   /* Adjust the size to account for (1) the size of the allocated node and
    * (2) to make sure that it is an even multiple of our granule size.
    */
diff --git a/mm/mm_heap/mm_malloc_size.c b/mm/mm_heap/mm_malloc_size.c
index 98ffde645a..bb766305db 100644
--- a/mm/mm_heap/mm_malloc_size.c
+++ b/mm/mm_heap/mm_malloc_size.c
@@ -46,6 +46,13 @@ size_t mm_malloc_size(FAR void *mem)
       return 0;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(mem))
+    {
+      return mempool_multiple_alloc_size(mem);
+    }
+#endif
+
   /* Map the memory chunk into a free node */
 
   node = (FAR struct mm_freenode_s *)((FAR char *)mem - SIZEOF_MM_ALLOCNODE);
diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c
index 45b61ee6b6..d8e30942b7 100644
--- a/mm/mm_heap/mm_memalign.c
+++ b/mm/mm_heap/mm_memalign.c
@@ -43,7 +43,7 @@
  *   within that chunk that meets the alignment request and then frees any
  *   leading or trailing space.
  *
- *   The alignment argument must be a power of two.  8-byte alignment is
+ *   The alignment argument must be a power of two. 16-byte alignment is
  *   guaranteed by normal malloc calls.
  *
  ****************************************************************************/
@@ -72,6 +72,14 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  node = mempool_multiple_memalign(&heap->mm_mpool, alignment, size);
+  if (node != NULL)
+    {
+      return node;
+    }
+#endif
+
   /* If this requested alinement's less than or equal to the natural
    * alignment of malloc, then just let malloc do the work.
    */
diff --git a/mm/mm_heap/mm_realloc.c b/mm/mm_heap/mm_realloc.c
index c928a46416..efb4b8c3d2 100644
--- a/mm/mm_heap/mm_realloc.c
+++ b/mm/mm_heap/mm_realloc.c
@@ -34,6 +34,12 @@
 #include "mm_heap/mm.h"
 #include "kasan/kasan.h"
 
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
 /****************************************************************************
  * Public Functions
  ****************************************************************************/
@@ -88,6 +94,36 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
       return NULL;
     }
 
+#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
+  if (MM_IS_FROM_MEMPOOL(oldmem))
+    {
+      newmem = mempool_multiple_realloc(&heap->mm_mpool, oldmem, size);
+      if (newmem != NULL)
+        {
+          return newmem;
+        }
+
+      newmem = mm_malloc(heap, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, mempool_multiple_alloc_size(oldmem));
+          mempool_multiple_free(&heap->mm_mpool, oldmem);
+        }
+
+      return newmem;
+    }
+  else
+    {
+      newmem = mempool_multiple_alloc(&heap->mm_mpool, size);
+      if (newmem != NULL)
+        {
+          memcpy(newmem, oldmem, MIN(size, mm_malloc_size(oldmem)));
+          mm_free(heap, oldmem);
+          return newmem;
+        }
+    }
+#endif
+
   /* Adjust the size to account for (1) the size of the allocated node and
    * (2) to make sure that it is an even multiple of our granule size.
    */