You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by je...@apache.org on 2020/12/10 07:34:06 UTC

[incubator-nuttx] 01/04: sched: irq: Change irq_waitlock() from private to public

This is an automated email from the ASF dual-hosted git repository.

jerpelea pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit a24905059e8d3dcf9e5cad2d4251af9177de8606
Author: Masayuki Ishikawa <ma...@gmail.com>
AuthorDate: Wed Nov 25 09:53:03 2020 +0900

    sched: irq: Change irq_waitlock() from private to public
    
    Signed-off-by: Masayuki Ishikawa <Ma...@jp.sony.com>
---
 include/nuttx/irq.h      | 50 ++++++++++++++++++++++++++++++++++++++++++++++++
 sched/irq/irq_csection.c |  2 +-
 2 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h
index 07474bc..0a6c094 100644
--- a/include/nuttx/irq.h
+++ b/include/nuttx/irq.h
@@ -46,6 +46,9 @@
 #ifndef __ASSEMBLY__
 # include <stdint.h>
 # include <assert.h>
+# ifdef CONFIG_SMP
+#  include <stdbool.h>
+# endif
 #endif
 
 /* Now include architecture-specific types */
@@ -171,6 +174,53 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg);
 #endif
 
 /****************************************************************************
+ * Name: irq_waitlock
+ *
+ * Description:
+ *   Spin to get g_irq_waitlock, handling a known deadlock condition:
+ *
+ *   A deadlock may occur if enter_critical_section is called from an
+ *   interrupt handler.  Suppose:
+ *
+ *   - CPUn is in a critical section and has the g_cpu_irqlock spinlock.
+ *   - CPUm takes an interrupt and attempts to enter the critical section.
+ *   - It spins waiting on g_cpu_irqlock with interrupts disabled.
+ *   - CPUn calls up_cpu_pause() to pause operation on CPUm.  This will
+ *     issue an inter-CPU interrupt to CPUm
+ *   - But interrupts are disabled on CPUm so the up_cpu_pause() is never
+ *     handled, causing the deadlock.
+ *
+ *   This same deadlock can occur in the normal tasking case:
+ *
+ *   - A task on CPUn enters a critical section and has the g_cpu_irqlock
+ *     spinlock.
+ *   - Another task on CPUm attempts to enter the critical section but has
+ *     to wait, spinning to get g_cpu_irqlock with interrupts disabled.
+ *   - The task on CPUn causes a new task to become ready-to-run and the
+ *     scheduler selects CPUm.  CPUm is requested to pause via a pause
+ *     interrupt.
+ *   - But the task on CPUm is also attempting to enter the critical
+ *     section.  Since it is spinning with interrupts disabled, CPUm cannot
+ *     process the pending pause interrupt, causing the deadlock.
+ *
+ *   This function detects this deadlock condition while spinning with \
+ *   interrupts disabled.
+ *
+ * Input Parameters:
+ *   cpu - The index of CPU that is trying to enter the critical section.
+ *
+ * Returned Value:
+ *   True:  The g_cpu_irqlock spinlock has been taken.
+ *   False: The g_cpu_irqlock spinlock has not been taken yet, but there is
+ *          a pending pause interrupt request.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_SMP
+bool irq_waitlock(int cpu);
+#endif
+
+/****************************************************************************
  * Name: enter_critical_section
  *
  * Description:
diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index c26e6d6..34565da 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -105,7 +105,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
-static inline bool irq_waitlock(int cpu)
+bool irq_waitlock(int cpu)
 {
 #ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
   FAR struct tcb_s *tcb = current_task(cpu);