You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by gn...@apache.org on 2020/03/11 17:10:14 UTC
[incubator-nuttx] branch master updated: sched/sched: Appease
nxstyle errors (#539)
This is an automated email from the ASF dual-hosted git repository.
gnutt pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git
The following commit(s) were added to refs/heads/master by this push:
new e9a9485 sched/sched: Appease nxstyle errors (#539)
e9a9485 is described below
commit e9a94859bc355e706f50946a51232655d1f78671
Author: YAMAMOTO Takashi <ya...@midokura.com>
AuthorDate: Thu Mar 12 02:10:08 2020 +0900
sched/sched: Appease nxstyle errors (#539)
The remaining errors ("Missing space before closing C comment")
don't make sense. Maybe a bug in nxstyle.
---
sched/sched/sched_addreadytorun.c | 7 ++--
sched/sched/sched_cpuload.c | 3 +-
sched/sched/sched_cpuselect.c | 3 +-
sched/sched/sched_lock.c | 20 +++++-----
sched/sched/sched_note.c | 17 +++++---
sched/sched/sched_removereadytorun.c | 9 +++--
sched/sched/sched_rrgetinterval.c | 3 +-
sched/sched/sched_setaffinity.c | 11 +++---
sched/sched/sched_setpriority.c | 5 ++-
sched/sched/sched_sporadic.c | 3 +-
sched/sched/sched_thistask.c | 6 +--
sched/sched/sched_unlock.c | 16 ++++----
sched/sched/sched_wait.c | 3 +-
sched/sched/sched_waitid.c | 18 +++++----
sched/sched/sched_waitpid.c | 77 +++++++++++++++++++-----------------
15 files changed, 111 insertions(+), 90 deletions(-)
diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c
index af29b96..c5e7c9a 100644
--- a/sched/sched/sched_addreadytorun.c
+++ b/sched/sched/sched_addreadytorun.c
@@ -201,7 +201,8 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
/* Determine the desired new task state. First, if the new task priority
* is higher then the priority of the lowest priority, running task, then
- * the new task will be running and a context switch switch will be required.
+ * the new task will be running and a context switch switch will be
+ * required.
*/
if (rtcb->sched_priority < btcb->sched_priority)
@@ -413,8 +414,8 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
* REVISIT: I have seen this assertion fire. Apparently another
* CPU may add another, higher priority task to the same
* g_assignedtasks[] list sometime after sched_cpu_select() was
- * called above, leaving this TCB in the wrong task list if task_state
- * is TSTATE_TASK_ASSIGNED).
+ * called above, leaving this TCB in the wrong task list if
+ * task_state is TSTATE_TASK_ASSIGNED).
*/
DEBUGASSERT(task_state == TSTATE_TASK_ASSIGNED);
diff --git a/sched/sched/sched_cpuload.c b/sched/sched/sched_cpuload.c
index dfc356b..c82944c 100644
--- a/sched/sched/sched_cpuload.c
+++ b/sched/sched/sched_cpuload.c
@@ -229,7 +229,8 @@ void weak_function nxsched_process_cpuload(void)
* Return load measurement data for the select PID.
*
* Input Parameters:
- * pid - The task ID of the thread of interest. pid == 0 is the IDLE thread.
+ * pid - The task ID of the thread of interest. pid == 0 is the IDLE
+ * thread.
* cpuload - The location to return the CPU load
*
* Returned Value:
diff --git a/sched/sched/sched_cpuselect.c b/sched/sched/sched_cpuselect.c
index e0d28f8..09d457c 100644
--- a/sched/sched/sched_cpuselect.c
+++ b/sched/sched/sched_cpuselect.c
@@ -95,7 +95,8 @@ int sched_cpu_select(cpu_set_t affinity)
if ((affinity & (1 << i)) != 0)
{
- FAR struct tcb_s *rtcb = (FAR struct tcb_s *)g_assignedtasks[i].head;
+ FAR struct tcb_s *rtcb = (FAR struct tcb_s *)
+ g_assignedtasks[i].head;
/* If this thread is executing its IDLE task, the use it. The
* IDLE task is always the last task in the assigned task list.
diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c
index 1f7f031..d77d6d3 100644
--- a/sched/sched/sched_lock.c
+++ b/sched/sched/sched_lock.c
@@ -86,9 +86,9 @@
* locked.
* 2. Scheduling logic would set the bit associated with the cpu in
* 'g_cpu_lockset' when the TCB at the head of the g_assignedtasks[cpu]
- * list transitions has 'lockcount' > 0. This might happen when sched_lock()
- * is called, or after a context switch that changes the TCB at the
- * head of the g_assignedtasks[cpu] list.
+ * list transitions has 'lockcount' > 0. This might happen when
+ * sched_lock() is called, or after a context switch that changes the
+ * TCB at the head of the g_assignedtasks[cpu] list.
* 3. Similarly, the cpu bit in the global 'g_cpu_lockset' would be cleared
* when the TCB at the head of the g_assignedtasks[cpu] list has
* 'lockcount' == 0. This might happen when sched_unlock() is called, or
@@ -160,14 +160,14 @@ int sched_lock(void)
#endif
int cpu;
- /* The following operation is non-atomic unless CONFIG_ARCH_GLOBAL_IRQDISABLE
- * or CONFIG_ARCH_HAVE_FETCHADD is defined.
+ /* The following operation is non-atomic unless
+ * CONFIG_ARCH_GLOBAL_IRQDISABLE or CONFIG_ARCH_HAVE_FETCHADD is defined.
*/
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
- /* If the CPU supports suppression of interprocessor interrupts, then simple
- * disabling interrupts will provide sufficient protection for the following
- * operation.
+ /* If the CPU supports suppression of interprocessor interrupts, then
+ * simple disabling interrupts will provide sufficient protection for
+ * the following operation.
*/
flags = up_irq_save();
@@ -211,8 +211,8 @@ int sched_lock(void)
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
/* We must hold the lock on this CPU before we increment the lockcount
- * for the first time. Holding the lock is sufficient to lockout context
- * switching.
+ * for the first time. Holding the lock is sufficient to lockout
+ * context switching.
*/
if (rtcb->lockcount == 0)
diff --git a/sched/sched/sched_note.c b/sched/sched/sched_note.c
index ff7cb35..28ca43d 100644
--- a/sched/sched/sched_note.c
+++ b/sched/sched/sched_note.c
@@ -145,7 +145,8 @@ static inline unsigned int note_next(unsigned int ndx, unsigned int offset)
*
****************************************************************************/
-static void note_common(FAR struct tcb_s *tcb, FAR struct note_common_s *note,
+static void note_common(FAR struct tcb_s *tcb,
+ FAR struct note_common_s *note,
uint8_t length, uint8_t type)
{
uint32_t systime = (uint32_t)clock_systimer();
@@ -163,7 +164,7 @@ static void note_common(FAR struct tcb_s *tcb, FAR struct note_common_s *note,
/* Save the LS 32-bits of the system timer in little endian order */
- note->nc_systime[0] = (uint8_t)( systime & 0xff);
+ note->nc_systime[0] = (uint8_t)(systime & 0xff);
note->nc_systime[1] = (uint8_t)((systime >> 8) & 0xff);
note->nc_systime[2] = (uint8_t)((systime >> 16) & 0xff);
note->nc_systime[3] = (uint8_t)((systime >> 24) & 0xff);
@@ -185,7 +186,8 @@ static void note_common(FAR struct tcb_s *tcb, FAR struct note_common_s *note,
****************************************************************************/
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
-void note_spincommon(FAR struct tcb_s *tcb, FAR volatile spinlock_t *spinlock,
+void note_spincommon(FAR struct tcb_s *tcb,
+ FAR volatile spinlock_t *spinlock,
int type)
{
struct note_spinlock_s note;
@@ -419,7 +421,8 @@ void sched_note_suspend(FAR struct tcb_s *tcb)
/* Format the note */
- note_common(tcb, ¬e.nsu_cmn, sizeof(struct note_suspend_s), NOTE_SUSPEND);
+ note_common(tcb, ¬e.nsu_cmn, sizeof(struct note_suspend_s),
+ NOTE_SUSPEND);
note.nsu_state = tcb->task_state;
/* Add the note to circular buffer */
@@ -573,12 +576,14 @@ void sched_note_spinlock(FAR struct tcb_s *tcb, FAR volatile void *spinlock)
note_spincommon(tcb, spinlock, NOTE_SPINLOCK_LOCK);
}
-void sched_note_spinlocked(FAR struct tcb_s *tcb, FAR volatile void *spinlock)
+void sched_note_spinlocked(FAR struct tcb_s *tcb,
+ FAR volatile void *spinlock)
{
note_spincommon(tcb, spinlock, NOTE_SPINLOCK_LOCKED);
}
-void sched_note_spinunlock(FAR struct tcb_s *tcb, FAR volatile void *spinlock)
+void sched_note_spinunlock(FAR struct tcb_s *tcb,
+ FAR volatile void *spinlock)
{
note_spincommon(tcb, spinlock, NOTE_SPINLOCK_UNLOCK);
}
diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c
index aa59427..fa842be 100644
--- a/sched/sched/sched_removereadytorun.c
+++ b/sched/sched/sched_removereadytorun.c
@@ -1,7 +1,8 @@
/****************************************************************************
* sched/sched_removereadytorun.c
*
- * Copyright (C) 2007-2009, 2012, 2016-2017 Gregory Nutt. All rights reserved.
+ * Copyright (C) 2007-2009, 2012, 2016-2017 Gregory Nutt.
+ * All rights reserved.
* Author: Gregory Nutt <gn...@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -223,9 +224,9 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
/* Did we find a task in the g_readytorun list? Which task should
* we use? We decide strictly by the priority of the two tasks:
- * Either (1) the task currently at the head of the g_assignedtasks[cpu]
- * list (nexttcb) or (2) the highest priority task from the
- * g_readytorun list with matching affinity (rtrtcb).
+ * Either (1) the task currently at the head of the
+ * g_assignedtasks[cpu] list (nexttcb) or (2) the highest priority
+ * task from the g_readytorun list with matching affinity (rtrtcb).
*/
if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
diff --git a/sched/sched/sched_rrgetinterval.c b/sched/sched/sched_rrgetinterval.c
index 1f44af2..89d67d7 100644
--- a/sched/sched/sched_rrgetinterval.c
+++ b/sched/sched/sched_rrgetinterval.c
@@ -129,7 +129,8 @@ int sched_rr_get_interval(pid_t pid, struct timespec *interval)
/* Convert the timeslice value from ticks to a timespec */
interval->tv_sec = CONFIG_RR_INTERVAL / MSEC_PER_SEC;
- interval->tv_nsec = (CONFIG_RR_INTERVAL % MSEC_PER_SEC) * NSEC_PER_MSEC;
+ interval->tv_nsec = (CONFIG_RR_INTERVAL % MSEC_PER_SEC) *
+ NSEC_PER_MSEC;
}
else
#endif
diff --git a/sched/sched/sched_setaffinity.c b/sched/sched/sched_setaffinity.c
index d9ba779..0926bfb 100644
--- a/sched/sched/sched_setaffinity.c
+++ b/sched/sched/sched_setaffinity.c
@@ -145,10 +145,10 @@ int nxsched_setaffinity(pid_t pid, size_t cpusetsize,
/* No.. then we will need to move the task from the assigned
* task list to some other ready to run list.
*
- * nxsched_setpriority() will do just what we want... it will remove
- * the task from its current position in the some assigned task list
- * and then simply put it back in the right place. This works even
- * if the task is this task.
+ * nxsched_setpriority() will do just what we want... it will
+ * remove the task from its current position in the some assigned
+ * task list and then simply put it back in the right place. This
+ * works even if the task is this task.
*/
ret = nxsched_setpriority(tcb, tcb->sched_priority);
@@ -193,7 +193,8 @@ errout_with_lock:
*
****************************************************************************/
-int sched_setaffinity(pid_t pid, size_t cpusetsize, FAR const cpu_set_t *mask)
+int sched_setaffinity(pid_t pid, size_t cpusetsize,
+ FAR const cpu_set_t *mask)
{
int ret = nxsched_setaffinity(pid, cpusetsize, mask);
if (ret < 0)
diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c
index 92d1d1a..4525778 100644
--- a/sched/sched/sched_setpriority.c
+++ b/sched/sched/sched_setpriority.c
@@ -197,8 +197,9 @@ static void nxsched_readytorun_setpriority(FAR struct tcb_s *tcb,
int cpu;
/* CASE 2a. The task is ready-to-run (but not running) but not assigned to
- * a CPU. An increase in priority could cause a context switch may be caused
- * by the re-prioritization. The task is not assigned and may run on any CPU.
+ * a CPU. An increase in priority could cause a context switch may be
+ * caused by the re-prioritization. The task is not assigned and may run
+ * on any CPU.
*/
if (tcb->task_state == TSTATE_TASK_READYTORUN)
diff --git a/sched/sched/sched_sporadic.c b/sched/sched/sched_sporadic.c
index ef65c7d..81425c4 100644
--- a/sched/sched/sched_sporadic.c
+++ b/sched/sched/sched_sporadic.c
@@ -530,7 +530,8 @@ static void sporadic_budget_expire(int argc, wdparm_t arg1, ...)
* replenishment.
*/
- DEBUGVERIFY(sporadic_replenish_delay(repl, period, unrealized));
+ DEBUGVERIFY(sporadic_replenish_delay(repl, period,
+ unrealized));
}
}
}
diff --git a/sched/sched/sched_thistask.c b/sched/sched/sched_thistask.c
index 1b75f54..d645ac4 100644
--- a/sched/sched/sched_thistask.c
+++ b/sched/sched/sched_thistask.c
@@ -73,9 +73,9 @@ FAR struct tcb_s *this_task(void)
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
irqstate_t flags;
- /* If the CPU supports suppression of interprocessor interrupts, then simple
- * disabling interrupts will provide sufficient protection for the following
- * operations.
+ /* If the CPU supports suppression of interprocessor interrupts, then
+ * simple disabling interrupts will provide sufficient protection for
+ * the following operations.
*/
flags = up_irq_save();
diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c
index 61ad181..c454f5e 100644
--- a/sched/sched/sched_unlock.c
+++ b/sched/sched/sched_unlock.c
@@ -170,8 +170,8 @@ int sched_unlock(void)
{
/* Yes.. that is the situation. But one more thing. The call
* to up_release_pending() above may have actually replaced
- * the task at the head of the ready-to-run list. In that case,
- * we need only to reset the timeslice value back to the
+ * the task at the head of the ready-to-run list. In that
+ * case, we need only to reset the timeslice value back to the
* maximum.
*/
@@ -199,8 +199,8 @@ int sched_unlock(void)
* for the next time slice.
*/
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC &&
- rtcb->timeslice < 0)
+ if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
+ && rtcb->timeslice < 0)
{
/* Yes.. that is the situation. Force the low-priority state
* now
@@ -301,8 +301,8 @@ int sched_unlock(void)
{
/* Yes.. that is the situation. But one more thing. The call
* to up_release_pending() above may have actually replaced
- * the task at the head of the ready-to-run list. In that case,
- * we need only to reset the timeslice value back to the
+ * the task at the head of the ready-to-run list. In that
+ * case, we need only to reset the timeslice value back to the
* maximum.
*/
@@ -330,8 +330,8 @@ int sched_unlock(void)
* for the next time slice.
*/
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC &&
- rtcb->timeslice < 0)
+ if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
+ && rtcb->timeslice < 0)
{
/* Yes.. that is the situation. Force the low-priority state
* now
diff --git a/sched/sched/sched_wait.c b/sched/sched/sched_wait.c
index bb5650b..7dd9070 100644
--- a/sched/sched/sched_wait.c
+++ b/sched/sched/sched_wait.c
@@ -68,7 +68,8 @@
*
* The waitpid() function will behave identically to wait(), if the pid
* argument is (pid_t)-1 and the options argument is 0. Otherwise, its
- * behaviour will be modified by the values of the pid and options arguments.
+ * behaviour will be modified by the values of the pid and options
+ * arguments.
*
* Input Parameters:
* stat_loc - The location to return the exit status
diff --git a/sched/sched/sched_waitid.c b/sched/sched/sched_waitid.c
index 5756731..b70caad 100644
--- a/sched/sched/sched_waitid.c
+++ b/sched/sched/sched_waitid.c
@@ -66,7 +66,8 @@
****************************************************************************/
#ifdef CONFIG_SCHED_CHILD_STATUS
-static void exited_child(FAR struct tcb_s *rtcb, FAR struct child_status_s *child,
+static void exited_child(FAR struct tcb_s *rtcb,
+ FAR struct child_status_s *child,
FAR siginfo_t *info)
{
/* The child has exited. Return the saved exit status (and some fudged
@@ -132,9 +133,9 @@ static void exited_child(FAR struct tcb_s *rtcb, FAR struct child_status_s *chil
*
* The 'info' argument must point to a siginfo_t structure. If waitid()
* returns because a child process was found that satisfied the conditions
- * indicated by the arguments idtype and options, then the structure pointed
- * to by 'info' will be filled in by the system with the status of the
- * process. The si_signo member will always be equal to SIGCHLD.
+ * indicated by the arguments idtype and options, then the structure
+ * pointed to by 'info' will be filled in by the system with the status of
+ * the process. The si_signo member will always be equal to SIGCHLD.
*
* Input Parameters:
* See description.
@@ -166,10 +167,11 @@ int waitid(idtype_t idtype, id_t id, FAR siginfo_t *info, int options)
int errcode;
int ret;
- /* MISSING LOGIC: If WNOHANG is provided in the options, then this function
- * should returned immediately. However, there is no mechanism available now
- * know if the thread has child: The children remember their parents (if
- * CONFIG_SCHED_HAVE_PARENT) but the parents do not remember their children.
+ /* MISSING LOGIC: If WNOHANG is provided in the options, then this
+ * function should returned immediately. However, there is no mechanism
+ * available now know if the thread has child: The children remember
+ * their parents (if CONFIG_SCHED_HAVE_PARENT) but the parents do not
+ * remember their children.
*/
#ifdef CONFIG_DEBUG_FEATURES
diff --git a/sched/sched/sched_waitpid.c b/sched/sched/sched_waitpid.c
index 86e3b35..644249d 100644
--- a/sched/sched/sched_waitpid.c
+++ b/sched/sched/sched_waitpid.c
@@ -67,27 +67,27 @@
* execution of the calling thread until status information for one of the
* terminated child processes of the calling process is available, or until
* delivery of a signal whose action is either to execute a signal-catching
- * function or to terminate the process. If more than one thread is suspended
- * in waitpid() awaiting termination of the same process, exactly one thread
- * will return the process status at the time of the target process
- * termination. If status information is available prior to the call to
- * waitpid(), return will be immediate.
+ * function or to terminate the process. If more than one thread is
+ * suspended in waitpid() awaiting termination of the same process, exactly
+ * one thread will return the process status at the time of the target
+ * process termination. If status information is available prior to the
+ * call to waitpid(), return will be immediate.
*
* The pid argument specifies a set of child processes for which status is
* requested. The waitpid() function will only return the status of a child
* process from this set:
*
- * - If pid is equal to (pid_t)-1, status is requested for any child process.
- * In this respect, waitpid() is then equivalent to wait().
- * - If pid is greater than 0, it specifies the process ID of a single child
- * process for which status is requested.
+ * - If pid is equal to (pid_t)-1, status is requested for any child
+ * process. In this respect, waitpid() is then equivalent to wait().
+ * - If pid is greater than 0, it specifies the process ID of a single
+ * child process for which status is requested.
* - If pid is 0, status is requested for any child process whose process
* group ID is equal to that of the calling process.
- * - If pid is less than (pid_t)-1, status is requested for any child process
- * whose process group ID is equal to the absolute value of pid.
+ * - If pid is less than (pid_t)-1, status is requested for any child
+ * process whose process group ID is equal to the absolute value of pid.
*
- * The options argument is constructed from the bitwise-inclusive OR of zero
- * or more of the following flags, defined in the <sys/wait.h> header:
+ * The options argument is constructed from the bitwise-inclusive OR of
+ * zero or more of the following flags, defined in the <sys/wait.h> header:
*
* WCONTINUED - The waitpid() function will report the status of any
* continued child process specified by pid whose status has not been
@@ -101,21 +101,22 @@
*
* If the calling process has SA_NOCLDWAIT set or has SIGCHLD set to
* SIG_IGN, and the process has no unwaited-for children that were
- * transformed into zombie processes, the calling thread will block until all
- * of the children of the process containing the calling thread terminate, and
- * waitpid() will fail and set errno to ECHILD.
+ * transformed into zombie processes, the calling thread will block until
+ * all of the children of the process containing the calling thread
+ * terminate, and waitpid() will fail and set errno to ECHILD.
*
* If waitpid() returns because the status of a child process is available,
* these functions will return a value equal to the process ID of the child
* process. In this case, if the value of the argument stat_loc is not a
* null pointer, information will be stored in the location pointed to by
- * stat_loc. The value stored at the location pointed to by stat_loc will be
- * 0 if and only if the status returned is from a terminated child process
- * that terminated by one of the following means:
+ * stat_loc. The value stored at the location pointed to by stat_loc will
+ * be 0 if and only if the status returned is from a terminated child
+ * process that terminated by one of the following means:
*
* 1. The process returned 0 from main().
* 2. The process called _exit() or exit() with a status argument of 0.
- * 3. The process was terminated because the last thread in the process terminated.
+ * 3. The process was terminated because the last thread in the process
+ * terminated.
*
* Regardless of its value, this information may be interpreted using the
* following macros, which are defined in <sys/wait.h> and evaluate to
@@ -140,7 +141,8 @@
* this macro evaluates to the number of the signal that caused the child
* process to stop.
* WIFCONTINUED(stat_val) - Evaluates to a non-zero value if status was
- * returned for a child process that has continued from a job control stop.
+ * returned for a child process that has continued from a job control
+ * stop.
*
* Input Parameters:
* pid - The task ID of the thread to waid for
@@ -156,16 +158,19 @@
* process, -1 will be returned and errno set to EINTR.
*
* If waitpid() was invoked with WNOHANG set in options, it has at least
- * one child process specified by pid for which status is not available, and
- * status is not available for any process specified by pid, 0 is returned.
+ * one child process specified by pid for which status is not available,
+ * and status is not available for any process specified by pid, 0 is
+ * returned.
*
- * Otherwise, (pid_t)-1 will be returned, and errno set to indicate the error:
+ * Otherwise, (pid_t)-1 will be returned, and errno set to indicate the
+ * error:
*
- * ECHILD - The process specified by pid does not exist or is not a child of
- * the calling process, or the process group specified by pid does not exist
- * or does not have any member process that is a child of the calling process.
- * EINTR - The function was interrupted by a signal. The value of the location
- * pointed to by stat_loc is undefined.
+ * ECHILD - The process specified by pid does not exist or is not a child
+ * of the calling process, or the process group specified by pid
+ * does not exist does not have any member process that is a child
+ * of the calling process.
+ * EINTR - The function was interrupted by a signal. The value of the
+ * location pointed to by stat_loc is undefined.
* EINVAL - The options argument is not valid.
*
* Assumptions:
@@ -214,10 +219,10 @@ pid_t waitpid(pid_t pid, int *stat_loc, int options)
group_addwaiter(group);
- /* "If more than one thread is suspended in waitpid() awaiting termination of
- * the same process, exactly one thread will return the process status at the
- * time of the target process termination." Hmmm.. what do we return to the
- * others?
+ /* "If more than one thread is suspended in waitpid() awaiting termination
+ * of the same process, exactly one thread will return the process status
+ * at the time of the target process termination." Hmmm.. what do we
+ * return to the others?
*/
if (group->tg_waitflags == 0)
@@ -260,9 +265,9 @@ pid_t waitpid(pid_t pid, int *stat_loc, int options)
if (ret < 0)
{
- /* Unlock pre-emption and return the ERROR (nxsem_wait has already set
- * the errno). Handle the awkward case of whether or not we need to
- * nullify the stat_loc value.
+ /* Unlock pre-emption and return the ERROR (nxsem_wait has already
+ * set the errno). Handle the awkward case of whether or not we
+ * need to nullify the stat_loc value.
*/
if (mystat)