You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by pk...@apache.org on 2023/10/30 09:18:40 UTC

(nuttx) branch master updated (949d01be51 -> 0c805ca0a9)

This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


    from 949d01be51 fs/inode: Change inode_unlink to static function
     new 08bae13624 mm/tlfs: Replace the critical section with spin lock
     new d5d4006c6b mm/gran: Replace the critical section with spin lock
     new 9e4a1be8d4 mm/iob: Replace the critical section with spin lock
     new 0c805ca0a9 mm: Change global spinlock to per heap

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 mm/iob/iob.h                   |  3 +++
 mm/iob/iob_add_queue.c         |  4 ++--
 mm/iob/iob_alloc.c             | 10 +++++-----
 mm/iob/iob_alloc_qentry.c      |  8 ++++----
 mm/iob/iob_free.c              |  6 +++---
 mm/iob/iob_free_qentry.c       |  5 +++--
 mm/iob/iob_free_queue_qentry.c |  8 +++++---
 mm/iob/iob_initialize.c        |  2 ++
 mm/iob/iob_remove_queue.c      |  9 +++++++--
 mm/mm_gran/mm_gran.h           |  2 ++
 mm/mm_gran/mm_grancritical.c   |  4 ++--
 mm/mm_heap/mm.h                |  1 +
 mm/mm_heap/mm_free.c           |  4 ++--
 mm/mm_heap/mm_malloc.c         |  4 ++--
 mm/tlsf/mm_tlsf.c              | 13 +++++--------
 15 files changed, 48 insertions(+), 35 deletions(-)


(nuttx) 02/04: mm/gran: Replace the critical section with spin lock

Posted by pk...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit d5d4006c6b88f2c856d888a107f443832ecc77b1
Author: Xiang Xiao <xi...@xiaomi.com>
AuthorDate: Sat Oct 21 17:20:02 2023 +0800

    mm/gran: Replace the critical section with spin lock
    
    Base on discusion: https://github.com/apache/nuttx/issues/10981
    
    Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
---
 mm/mm_gran/mm_gran.h         | 2 ++
 mm/mm_gran/mm_grancritical.c | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/mm/mm_gran/mm_gran.h b/mm/mm_gran/mm_gran.h
index 3ef67916d3..5840d9682f 100644
--- a/mm/mm_gran/mm_gran.h
+++ b/mm/mm_gran/mm_gran.h
@@ -32,6 +32,7 @@
 #include <arch/types.h>
 #include <nuttx/mm/gran.h>
 #include <nuttx/mutex.h>
+#include <nuttx/spinlock.h>
 
 /****************************************************************************
  * Pre-processor Definitions
@@ -68,6 +69,7 @@ struct gran_s
   uint16_t   ngranules; /* The total number of (aligned) granules in the heap */
 #ifdef CONFIG_GRAN_INTR
   irqstate_t irqstate;  /* For exclusive access to the GAT */
+  spinlock_t lock;
 #else
   mutex_t    lock;       /* For exclusive access to the GAT */
 #endif
diff --git a/mm/mm_gran/mm_grancritical.c b/mm/mm_gran/mm_grancritical.c
index 5a99a89a6d..0b988ecd07 100644
--- a/mm/mm_gran/mm_grancritical.c
+++ b/mm/mm_gran/mm_grancritical.c
@@ -57,7 +57,7 @@
 int gran_enter_critical(FAR struct gran_s *priv)
 {
 #ifdef CONFIG_GRAN_INTR
-  priv->irqstate = enter_critical_section();
+  priv->irqstate = spin_lock_irqsave(&priv->lock);
   return OK;
 #else
   return nxmutex_lock(&priv->lock);
@@ -67,7 +67,7 @@ int gran_enter_critical(FAR struct gran_s *priv)
 void gran_leave_critical(FAR struct gran_s *priv)
 {
 #ifdef CONFIG_GRAN_INTR
-  leave_critical_section(priv->irqstate);
+  spin_unlock_irqrestore(&priv->lock, priv->irqstate);
 #else
   nxmutex_unlock(&priv->lock);
 #endif


(nuttx) 03/04: mm/iob: Replace the critical section with spin lock

Posted by pk...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 9e4a1be8d482119f004549e1bb076227b1e58279
Author: Xiang Xiao <xi...@xiaomi.com>
AuthorDate: Sat Oct 21 17:20:29 2023 +0800

    mm/iob: Replace the critical section with spin lock
    
    Base on discusion: https://github.com/apache/nuttx/issues/10981
    
    Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
---
 mm/iob/iob.h                   |  3 +++
 mm/iob/iob_add_queue.c         |  4 ++--
 mm/iob/iob_alloc.c             | 10 +++++-----
 mm/iob/iob_alloc_qentry.c      |  8 ++++----
 mm/iob/iob_free.c              |  6 +++---
 mm/iob/iob_free_qentry.c       |  5 +++--
 mm/iob/iob_free_queue_qentry.c |  8 +++++---
 mm/iob/iob_initialize.c        |  2 ++
 mm/iob/iob_remove_queue.c      |  9 +++++++--
 9 files changed, 34 insertions(+), 21 deletions(-)

diff --git a/mm/iob/iob.h b/mm/iob/iob.h
index 3181385d34..14947915de 100644
--- a/mm/iob/iob.h
+++ b/mm/iob/iob.h
@@ -30,6 +30,7 @@
 #include <debug.h>
 
 #include <nuttx/mm/iob.h>
+#include <nuttx/spinlock.h>
 #include <nuttx/semaphore.h>
 
 #ifdef CONFIG_MM_IOB
@@ -80,6 +81,8 @@ extern sem_t g_throttle_sem;  /* Counts available I/O buffers when throttled */
 extern sem_t g_qentry_sem;    /* Counts free I/O buffer queue containers */
 #endif
 
+extern spinlock_t g_iob_lock;
+
 /****************************************************************************
  * Public Function Prototypes
  ****************************************************************************/
diff --git a/mm/iob/iob_add_queue.c b/mm/iob/iob_add_queue.c
index 4a5ffea2d0..2593d2a14f 100644
--- a/mm/iob/iob_add_queue.c
+++ b/mm/iob/iob_add_queue.c
@@ -61,7 +61,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
 
   qentry->qe_flink = NULL;
 
-  irqstate_t flags = enter_critical_section();
+  irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
   if (!iobq->qh_head)
     {
       iobq->qh_head = qentry;
@@ -74,7 +74,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
       iobq->qh_tail = qentry;
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
 
   return 0;
 }
diff --git a/mm/iob/iob_alloc.c b/mm/iob/iob_alloc.c
index 3cb7dff71c..5edb369e34 100644
--- a/mm/iob/iob_alloc.c
+++ b/mm/iob/iob_alloc.c
@@ -73,7 +73,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
    * to protect the committed list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Take the I/O buffer from the head of the committed list */
 
@@ -92,7 +92,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
       iob->io_pktlen = 0;    /* Total length of the packet */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iob;
 }
 
@@ -272,7 +272,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
    * to protect the free list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
 #if CONFIG_IOB_THROTTLE > 0
   /* If there are free I/O buffers for this allocation */
@@ -314,7 +314,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
           g_throttle_sem.semcount--;
 #endif
 
-          leave_critical_section(flags);
+          spin_unlock_irqrestore(&g_iob_lock, flags);
 
           /* Put the I/O buffer in a known state */
 
@@ -326,6 +326,6 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
         }
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return NULL;
 }
diff --git a/mm/iob/iob_alloc_qentry.c b/mm/iob/iob_alloc_qentry.c
index 567b900534..0f00019ab9 100644
--- a/mm/iob/iob_alloc_qentry.c
+++ b/mm/iob/iob_alloc_qentry.c
@@ -57,7 +57,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
    * to protect the committed list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Take the I/O buffer from the head of the committed list */
 
@@ -73,7 +73,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
       iobq->qe_head = NULL; /* Nothing is contained */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iobq;
 }
 
@@ -199,7 +199,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
    * to protect the free list:  We disable interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
   iobq  = g_iob_freeqlist;
   if (iobq)
     {
@@ -225,7 +225,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
       iobq->qe_head = NULL; /* Nothing is contained */
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
   return iobq;
 }
 
diff --git a/mm/iob/iob_free.c b/mm/iob/iob_free.c
index 232e28ff4e..f2c04be917 100644
--- a/mm/iob/iob_free.c
+++ b/mm/iob/iob_free.c
@@ -118,7 +118,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
    * interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB, then put
    * the IOB on either the free list or on the committed list where
@@ -137,6 +137,8 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
       g_iob_freelist  = iob;
     }
 
+  spin_unlock_irqrestore(&g_iob_lock, flags);
+
   /* Signal that an IOB is available.  If there is a thread blocked,
    * waiting for an IOB, this will wake up exactly one thread.  The
    * semaphore count will correctly indicated that the awakened task
@@ -168,8 +170,6 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
     }
 #endif
 
-  leave_critical_section(flags);
-
   /* And return the I/O buffer after the one that was freed */
 
   return next;
diff --git a/mm/iob/iob_free_qentry.c b/mm/iob/iob_free_qentry.c
index 527861afd7..5709ec73b0 100644
--- a/mm/iob/iob_free_qentry.c
+++ b/mm/iob/iob_free_qentry.c
@@ -58,7 +58,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
    * interrupts very briefly.
    */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(&g_iob_lock);
 
   /* Which list?  If there is a task waiting for an IOB chain, then put
    * the IOB chain on either the free list or on the committed list where
@@ -77,6 +77,8 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
       g_iob_freeqlist  = iobq;
     }
 
+  spin_unlock_irqrestore(&g_iob_lock, flags);
+
   /* Signal that an I/O buffer chain container is available.  If there
    * is a thread waiting for an I/O buffer chain container, this will
    * wake up exactly one thread.  The semaphore count will correctly
@@ -85,7 +87,6 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
    */
 
   nxsem_post(&g_qentry_sem);
-  leave_critical_section(flags);
 
   /* And return the I/O buffer chain container after the one that was freed */
 
diff --git a/mm/iob/iob_free_queue_qentry.c b/mm/iob/iob_free_queue_qentry.c
index 3801efe9c1..88d34ffbf9 100644
--- a/mm/iob/iob_free_queue_qentry.c
+++ b/mm/iob/iob_free_queue_qentry.c
@@ -51,7 +51,7 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
   FAR struct iob_qentry_s *prev = NULL;
   FAR struct iob_qentry_s *qentry;
 
-  irqstate_t flags = enter_critical_section();
+  irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
   for (qentry = iobq->qh_head; qentry != NULL;
        prev = qentry, qentry = qentry->qe_flink)
     {
@@ -73,6 +73,8 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
               iobq->qh_tail = prev;
             }
 
+          spin_unlock_irqrestore(&g_iob_lock, flags);
+
           /* Remove the queue container */
 
           iob_free_qentry(qentry);
@@ -81,11 +83,11 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
 
           iob_free_chain(iob);
 
-          break;
+          return;
         }
     }
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(&g_iob_lock, flags);
 }
 
 #endif /* CONFIG_IOB_NCHAINS > 0 */
diff --git a/mm/iob/iob_initialize.c b/mm/iob/iob_initialize.c
index 08b30e5165..d232f1e7bf 100644
--- a/mm/iob/iob_initialize.c
+++ b/mm/iob/iob_initialize.c
@@ -102,6 +102,8 @@ sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
 sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS);
 #endif
 
+spinlock_t g_iob_lock = SP_UNLOCKED;
+
 /****************************************************************************
  * Public Functions
  ****************************************************************************/
diff --git a/mm/iob/iob_remove_queue.c b/mm/iob/iob_remove_queue.c
index 04953867c1..8ccbabcdea 100644
--- a/mm/iob/iob_remove_queue.c
+++ b/mm/iob/iob_remove_queue.c
@@ -56,7 +56,7 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
 
   /* Remove the I/O buffer chain from the head of the queue */
 
-  irqstate_t flags = enter_critical_section();
+  irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
   qentry = iobq->qh_head;
   if (qentry)
     {
@@ -66,6 +66,8 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
           iobq->qh_tail = NULL;
         }
 
+      spin_unlock_irqrestore(&g_iob_lock, flags);
+
       /* Extract the I/O buffer chain from the container and free the
        * container.
        */
@@ -73,8 +75,11 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
       iob = qentry->qe_head;
       iob_free_qentry(qentry);
     }
+  else
+    {
+      spin_unlock_irqrestore(&g_iob_lock, flags);
+    }
 
-  leave_critical_section(flags);
   return iob;
 }
 


(nuttx) 04/04: mm: Change global spinlock to per heap

Posted by pk...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 0c805ca0a907fad87074276a693d0b1f7a5332ad
Author: Xiang Xiao <xi...@xiaomi.com>
AuthorDate: Mon Oct 30 01:01:37 2023 +0800

    mm: Change global spinlock to per heap
    
    Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
---
 mm/mm_heap/mm.h        |  1 +
 mm/mm_heap/mm_free.c   |  4 ++--
 mm/mm_heap/mm_malloc.c |  4 ++--
 mm/tlsf/mm_tlsf.c      | 13 +++++--------
 4 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h
index f7c5549939..163aa46818 100644
--- a/mm/mm_heap/mm.h
+++ b/mm/mm_heap/mm.h
@@ -250,6 +250,7 @@ struct mm_heap_s
    * immdiately.
    */
 
+  spinlock_t mm_spinlock;
   FAR struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
 
   /* The is a multiple mempool of the heap */
diff --git a/mm/mm_heap/mm_free.c b/mm/mm_heap/mm_free.c
index 7716c720ce..b41464da9f 100644
--- a/mm/mm_heap/mm_free.c
+++ b/mm/mm_heap/mm_free.c
@@ -45,12 +45,12 @@ static void add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
 
   /* Delay the deallocation until a more appropriate time. */
 
-  flags = spin_lock_irqsave(NULL);
+  flags = spin_lock_irqsave(&heap->mm_spinlock);
 
   tmp->flink = heap->mm_delaylist[up_cpu_index()];
   heap->mm_delaylist[up_cpu_index()] = tmp;
 
-  spin_unlock_irqrestore(NULL, flags);
+  spin_unlock_irqrestore(&heap->mm_spinlock, flags);
 #endif
 }
 
diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c
index c8c12a974d..df2624aba0 100644
--- a/mm/mm_heap/mm_malloc.c
+++ b/mm/mm_heap/mm_malloc.c
@@ -47,12 +47,12 @@ static void free_delaylist(FAR struct mm_heap_s *heap)
 
   /* Move the delay list to local */
 
-  flags = spin_lock_irqsave(NULL);
+  flags = spin_lock_irqsave(&heap->mm_spinlock);
 
   tmp = heap->mm_delaylist[up_cpu_index()];
   heap->mm_delaylist[up_cpu_index()] = NULL;
 
-  spin_unlock_irqrestore(NULL, flags);
+  spin_unlock_irqrestore(&heap->mm_spinlock, flags);
 
   /* Test if the delayed is empty */
 
diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c
index 5d221a8e3d..0f93c2229f 100644
--- a/mm/tlsf/mm_tlsf.c
+++ b/mm/tlsf/mm_tlsf.c
@@ -97,11 +97,8 @@ struct mm_heap_s
 
   /* Free delay list, for some situation can't do free immdiately */
 
-#ifdef CONFIG_SMP
+  spinlock_t mm_spinlock;
   struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
-#else
-  struct mm_delaynode_s *mm_delaylist[1];
-#endif
 
 #if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
   struct procfs_meminfo_entry_s mm_procfs;
@@ -173,12 +170,12 @@ static void add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
 
   /* Delay the deallocation until a more appropriate time. */
 
-  flags = spin_lock_irqsave(NULL);
+  flags = spin_lock_irqsave(&heap->mm_spinlock);
 
   tmp->flink = heap->mm_delaylist[up_cpu_index()];
   heap->mm_delaylist[up_cpu_index()] = tmp;
 
-  spin_unlock_irqrestore(NULL, flags);
+  spin_unlock_irqrestore(&heap->mm_spinlock, flags);
 #endif
 }
 
@@ -194,12 +191,12 @@ static void free_delaylist(FAR struct mm_heap_s *heap)
 
   /* Move the delay list to local */
 
-  flags = spin_lock_irqsave(NULL);
+  flags = spin_lock_irqsave(&heap->mm_spinlock);
 
   tmp = heap->mm_delaylist[up_cpu_index()];
   heap->mm_delaylist[up_cpu_index()] = NULL;
 
-  spin_unlock_irqrestore(NULL, flags);
+  spin_unlock_irqrestore(&heap->mm_spinlock, flags);
 
   /* Test if the delayed is empty */
 


(nuttx) 01/04: mm/tlfs: Replace the critical section with spin lock

Posted by pk...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 08bae1362453874d457f03038cfa759d60ad72f7
Author: Xiang Xiao <xi...@xiaomi.com>
AuthorDate: Sat Oct 21 17:18:57 2023 +0800

    mm/tlfs: Replace the critical section with spin lock
    
    Base on discusion: https://github.com/apache/nuttx/issues/10981
    
    Signed-off-by: Xiang Xiao <xi...@xiaomi.com>
---
 mm/tlsf/mm_tlsf.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c
index 1333d4bcc8..5d221a8e3d 100644
--- a/mm/tlsf/mm_tlsf.c
+++ b/mm/tlsf/mm_tlsf.c
@@ -173,12 +173,12 @@ static void add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
 
   /* Delay the deallocation until a more appropriate time. */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(NULL);
 
   tmp->flink = heap->mm_delaylist[up_cpu_index()];
   heap->mm_delaylist[up_cpu_index()] = tmp;
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(NULL, flags);
 #endif
 }
 
@@ -194,12 +194,12 @@ static void free_delaylist(FAR struct mm_heap_s *heap)
 
   /* Move the delay list to local */
 
-  flags = enter_critical_section();
+  flags = spin_lock_irqsave(NULL);
 
   tmp = heap->mm_delaylist[up_cpu_index()];
   heap->mm_delaylist[up_cpu_index()] = NULL;
 
-  leave_critical_section(flags);
+  spin_unlock_irqrestore(NULL, flags);
 
   /* Test if the delayed is empty */