You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by xi...@apache.org on 2022/08/01 17:44:56 UTC

[incubator-nuttx] 02/02: mm_heap/backtrace: move MM_ADD_BACKTRACE out of heap lock

This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit 5db0ab1e6aa69ec2a67bde71d1862efc0de7be39
Author: chao.an <an...@xiaomi.com>
AuthorDate: Mon Aug 1 15:59:37 2022 +0800

    mm_heap/backtrace: move MM_ADD_BACKTRACE out of heap lock
    
    move MM_ADD_BACKTRACE out of heap lock to improve the performance
    
    Signed-off-by: chao.an <an...@xiaomi.com>
---
 mm/mm_heap/mm_malloc.c   | 2 +-
 mm/mm_heap/mm_memalign.c | 3 ++-
 mm/mm_heap/mm_realloc.c  | 9 +++++----
 3 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c
index 72b40c9b8b..a38633bbfe 100644
--- a/mm/mm_heap/mm_malloc.c
+++ b/mm/mm_heap/mm_malloc.c
@@ -229,7 +229,6 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
       /* Handle the case of an exact size match */
 
       node->preceding |= MM_ALLOC_BIT;
-      MM_ADD_BACKTRACE(heap, node);
       ret = (FAR void *)((FAR char *)node + SIZEOF_MM_ALLOCNODE);
     }
 
@@ -238,6 +237,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
 
   if (ret)
     {
+      MM_ADD_BACKTRACE(heap, node);
       kasan_unpoison(ret, mm_malloc_size(ret));
 #ifdef CONFIG_MM_FILL_ALLOCATIONS
       memset(ret, 0xaa, alignsize - SIZEOF_MM_ALLOCNODE);
diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c
index d3681dd79d..47c27d61ba 100644
--- a/mm/mm_heap/mm_memalign.c
+++ b/mm/mm_heap/mm_memalign.c
@@ -178,7 +178,6 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
 
       newnode->size = (size_t)next - (size_t)newnode;
       newnode->preceding = precedingsize | MM_ALLOC_BIT;
-      MM_ADD_BACKTRACE(heap, newnode);
 
       /* Reduce the size of the original chunk and mark it not allocated, */
 
@@ -224,6 +223,8 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
 
   mm_givesemaphore(heap);
 
+  MM_ADD_BACKTRACE(heap, node);
+
   kasan_unpoison((FAR void *)alignedchunk,
                  mm_malloc_size((FAR void *)alignedchunk));
 
diff --git a/mm/mm_heap/mm_realloc.c b/mm/mm_heap/mm_realloc.c
index 2604099f8b..6331465d34 100644
--- a/mm/mm_heap/mm_realloc.c
+++ b/mm/mm_heap/mm_realloc.c
@@ -128,11 +128,12 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
                        oldsize - oldnode->size);
         }
 
-      MM_ADD_BACKTRACE(heap, oldnode);
-
       /* Then return the original address */
 
       mm_givesemaphore(heap);
+
+      MM_ADD_BACKTRACE(heap, oldnode);
+
       return oldmem;
     }
 
@@ -334,10 +335,10 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
             }
         }
 
-      MM_ADD_BACKTRACE(heap, (FAR char *)newmem - SIZEOF_MM_ALLOCNODE);
-
       mm_givesemaphore(heap);
 
+      MM_ADD_BACKTRACE(heap, (FAR char *)newmem - SIZEOF_MM_ALLOCNODE);
+
       kasan_unpoison(newmem, mm_malloc_size(newmem));
       if (newmem != oldmem)
         {