You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by xi...@apache.org on 2021/09/16 15:53:55 UTC

[incubator-nuttx] branch master updated: sched/irq/irq_csection.c: Fix typos and correct some comments.

This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new 3b2aea2  sched/irq/irq_csection.c: Fix typos and correct some comments.
3b2aea2 is described below

commit 3b2aea204c950035e1189f471b91ae817b239cc7
Author: Abdelatif Guettouche <ab...@espressif.com>
AuthorDate: Thu Sep 16 16:52:46 2021 +0200

    sched/irq/irq_csection.c: Fix typos and correct some comments.
    
    Signed-off-by: Abdelatif Guettouche <ab...@espressif.com>
---
 include/nuttx/irq.h      |  2 +-
 sched/irq/irq_csection.c | 16 ++++++++--------
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h
index 60ab8bd..ba43f42 100644
--- a/include/nuttx/irq.h
+++ b/include/nuttx/irq.h
@@ -162,7 +162,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg);
  *   instrumentation):
  *
  *     Take the CPU IRQ lock and disable interrupts on all CPUs.  A thread-
- *     specific counter is increment to indicate that the thread has IRQs
+ *     specific counter is incremented to indicate that the thread has IRQs
  *     disabled and to support nested calls to enter_critical_section().
  *
  *     NOTE: Most architectures do not support disabling all CPUs from one
diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index 4c76221..f922fd1 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -66,7 +66,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
  * Name: irq_waitlock
  *
  * Description:
- *   Spin to get g_irq_waitlock, handling a known deadlock condition:
+ *   Spin to get g_cpu_irqlock, handling a known deadlock condition:
  *
  *   A deadlock may occur if enter_critical_section is called from an
  *   interrupt handler.  Suppose:
@@ -92,7 +92,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
  *     section.  Since it is spinning with interrupts disabled, CPUm cannot
  *     process the pending pause interrupt, causing the deadlock.
  *
- *   This function detects this deadlock condition while spinning with \
+ *   This function detects this deadlock condition while spinning with
  *   interrupts disabled.
  *
  * Input Parameters:
@@ -131,7 +131,7 @@ static bool irq_waitlock(int cpu)
            */
 
 #ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
-          /* Notify that we are waiting for a spinlock */
+          /* Notify that we have aborted the wait for the spinlock */
 
           sched_note_spinabort(tcb, &g_cpu_irqlock);
 #endif
@@ -161,7 +161,7 @@ static bool irq_waitlock(int cpu)
  *
  * Description:
  *   Take the CPU IRQ lock and disable interrupts on all CPUs.  A thread-
- *   specific counter is increment to indicate that the thread has IRQs
+ *   specific counter is incremented to indicate that the thread has IRQs
  *   disabled and to support nested calls to enter_critical_section().
  *
  ****************************************************************************/
@@ -253,7 +253,7 @@ try_again:
 
           else
             {
-              /* Make sure that the g_cpu_irqlock() was not already set
+              /* Make sure that the g_cpu_irqset was not already set
                * by previous logic on this CPU that was executed by the
                * interrupt handler.  We know that the bit in g_cpu_irqset
                * for this CPU was zero on entry into the interrupt handler,
@@ -270,8 +270,8 @@ try_again_in_irq:
                   if (!irq_waitlock(cpu))
                     {
                       /* We are in a deadlock condition due to a pending
-                       * pause request interrupt request.  Break the
-                       * deadlock by handling the pause interrupt now.
+                       * pause request interrupt.  Break the deadlock by
+                       * handling the pause request now.
                        */
 
                       DEBUGVERIFY(up_cpu_paused(cpu));
@@ -360,7 +360,7 @@ try_again_in_irq:
                   goto try_again;
                 }
 
-              /* The set the lock count to 1.
+              /* Then set the lock count to 1.
                *
                * Interrupts disables must follow a stacked order.  We
                * cannot other context switches to re-order the enabling