You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by ma...@apache.org on 2020/10/30 10:06:29 UTC

[incubator-nuttx] 02/02: sched: Fix sched_lock() logic for SMP

This is an automated email from the ASF dual-hosted git repository.

masayuki pushed a commit to branch releases/10.0
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit a881495f7489995591d0c1f86c31de3799e4643d
Author: Masayuki Ishikawa <ma...@gmail.com>
AuthorDate: Thu Oct 15 10:03:25 2020 +0900

    sched: Fix sched_lock() logic for SMP
    
    Summary:
    - I noticed sched_lock() logic is different from sched_unlock()
    - I think sched_lock() should use critical section
    - Also, the code should be simple like sched_unlock()
    - This commit fixes these issues
    
    Impact:
    - Affects SMP only
    
    Testing:
    - Tested with spresense:wifi_smp (both NCPUS=2 and 3)
    - Tested with lc823450-xgevk:rndis
    - Tested with maix-bit:smp (QEMU)
    - Tested with esp32-core:smp (QEMU)
    - Tested with sabre-6quad:smp (QEMU)
    
    Signed-off-by: Masayuki Ishikawa <Ma...@jp.sony.com>
---
 sched/sched/sched_lock.c | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c
index 33b3f71..2b678ee 100644
--- a/sched/sched/sched_lock.c
+++ b/sched/sched/sched_lock.c
@@ -133,29 +133,20 @@ volatile cpu_set_t g_cpu_lockset SP_SECTION;
 int sched_lock(void)
 {
   FAR struct tcb_s *rtcb;
-  irqstate_t flags;
-  int cpu;
 
   /* If the CPU supports suppression of interprocessor interrupts, then
    * simple disabling interrupts will provide sufficient protection for
    * the following operation.
    */
 
-  flags = up_irq_save();
-
-  cpu  = this_cpu();
-  rtcb = current_task(cpu);
+  rtcb = this_task();
 
   /* Check for some special cases:  (1) rtcb may be NULL only during early
    * boot-up phases, and (2) sched_lock() should have no effect if called
    * from the interrupt level.
    */
 
-  if (rtcb == NULL || up_interrupt_context())
-    {
-      up_irq_restore(flags);
-    }
-  else
+  if (rtcb != NULL && !up_interrupt_context())
     {
       /* Catch attempts to increment the lockcount beyond the range of the
        * integer type.
@@ -163,6 +154,8 @@ int sched_lock(void)
 
       DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
 
+      irqstate_t flags = enter_critical_section();
+
       /* We must hold the lock on this CPU before we increment the lockcount
        * for the first time. Holding the lock is sufficient to lockout
        * context switching.
@@ -196,8 +189,6 @@ int sched_lock(void)
 
       rtcb->lockcount++;
 
-      up_irq_restore(flags);
-
 #if defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION) || \
     defined(CONFIG_SCHED_CRITMONITOR)
       /* Check if we just acquired the lock */
@@ -223,6 +214,8 @@ int sched_lock(void)
       nxsched_merge_prioritized((FAR dq_queue_t *)&g_readytorun,
                                 (FAR dq_queue_t *)&g_pendingtasks,
                                 TSTATE_TASK_PENDING);
+
+      leave_critical_section(flags);
     }
 
   return OK;