You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by pk...@apache.org on 2022/11/07 09:02:01 UTC

[incubator-nuttx] 02/03: mm: Restore the return type of mm_lock from bool to int

This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit b567e09c3c54ac93cd3efacf86a0bc77211a773c
Author: Xiang Xiao <xi...@xiaomi.com>
AuthorDate: Sun Nov 6 06:44:47 2022 +0800

    mm: Restore the return type of mm_lock from bool to int
    
    Fix the issue reported here better:
    https://github.com/apache/incubator-nuttx/pull/6995
    
    Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
---
 mm/mm_heap/mm.h         |  2 +-
 mm/mm_heap/mm_foreach.c |  2 +-
 mm/mm_heap/mm_free.c    |  2 +-
 mm/mm_heap/mm_lock.c    | 12 ++++++------
 4 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index 5cb0205148..36a908770f 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -240,7 +240,7 @@ typedef CODE void (*mmchunk_handler_t)(FAR struct mm_allocnode_s *node,
 
 /* Functions contained in mm_lock.c *****************************************/
 
-bool mm_lock(FAR struct mm_heap_s *heap);
+int mm_lock(FAR struct mm_heap_s *heap);
 void mm_unlock(FAR struct mm_heap_s *heap);
 
 /* Functions contained in mm_shrinkchunk.c **********************************/
diff --git a/mm/mm_heap/mm_foreach.c b/mm/mm_heap/mm_foreach.c
index 9c86fd2dbb..b1c8982da6 100644
--- a/mm/mm_heap/mm_foreach.c
+++ b/mm/mm_heap/mm_foreach.c
@@ -68,7 +68,7 @@ void mm_foreach(FAR struct mm_heap_s *heap, mmchunk_handler_t handler,
        * Retake the mutex for each region to reduce latencies
        */
 
-      if (!mm_lock(heap))
+      if (mm_lock(heap) < 0)
         {
           return;
         }
diff --git a/mm/mm_heap/mm_free.c b/mm/mm_heap/mm_free.c
index d7d75d574b..6002b4dd4f 100644
--- a/mm/mm_heap/mm_free.c
+++ b/mm/mm_heap/mm_free.c
@@ -84,7 +84,7 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
       return;
     }
 
-  if (mm_lock(heap) == false)
+  if (mm_lock(heap) < 0)
     {
       /* Meet -ESRCH return, which means we are in situations
        * during context switching(See mm_lock() & getpid()).
diff --git a/mm/mm_heap/mm_lock.c b/mm/mm_heap/mm_lock.c
index b77ad77a0d..92ab126c90 100644
--- a/mm/mm_heap/mm_lock.c
+++ b/mm/mm_heap/mm_lock.c
@@ -51,11 +51,11 @@
  *   heap  - heap instance want to take mutex
  *
  * Returned Value:
- *   true if the lock can be taken, otherwise false.
+ *   0 if the lock can be taken, otherwise negative errno.
  *
  ****************************************************************************/
 
-bool mm_lock(FAR struct mm_heap_s *heap)
+int mm_lock(FAR struct mm_heap_s *heap)
 {
 #if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
   /* Check current environment */
@@ -67,11 +67,11 @@ bool mm_lock(FAR struct mm_heap_s *heap)
        * Or, touch the heap internal data directly.
        */
 
-      return !nxmutex_is_locked(&heap->mm_lock);
+      return nxmutex_is_locked(&heap->mm_lock) ? -EAGAIN : 0;
 #else
       /* Can't take mutex in SMP interrupt handler */
 
-      return false;
+      return -EAGAIN;
 #endif
     }
   else
@@ -89,11 +89,11 @@ bool mm_lock(FAR struct mm_heap_s *heap)
 
   if (getpid() < 0)
     {
-      return false;
+      return -ESRCH;
     }
   else
     {
-      return nxmutex_lock(&heap->mm_lock) >= 0;
+      return nxmutex_lock(&heap->mm_lock);
     }
 }