You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by ma...@apache.org on 2022/06/09 22:23:48 UTC
[incubator-nuttx] branch master updated: sched/mqueue: remove sched_lock to improve performance
This is an automated email from the ASF dual-hosted git repository.
masayuki pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git
The following commit(s) were added to refs/heads/master by this push:
new f5d4571abc sched/mqueue: remove sched_lock to improve performance
f5d4571abc is described below
commit f5d4571abc002b7560d5d3ab11b0cddcd6d0ce8e
Author: chao.an <an...@xiaomi.com>
AuthorDate: Wed Jun 8 17:55:39 2022 +0800
sched/mqueue: remove sched_lock to improve performance
remove the sched_lock/unlock to improve the performance by 18%
mq_send Flow Cycle Count
mq_send Origin Optimized
|
->nxmq_send 24 24
|
->file_mq_send 209 209
|
|->sched_lock 243 N/A <-
|->nxmq_do_send 391 348
| |
| |->sched_lock 434 N/A <-
| |->up_unblock_task 545 459
| ->sched_unlock 675 N/A <-
|
->sched_unlock 684 N/A <-
|
->up_release_pending 701 N/A
|
->arm_switchcontext 856 610
mq_receive
|
->arm_fullcontextrestore 1375 1133
|
->up_block_task 1375 1133
|
->nxmq_wait_receive 1530 1288
|
->file_mq_receive 1606 1310
|
->nxmq_receive 1616 1320
|
->mq_receive 1628 1332 - 18%
Signed-off-by: chao.an <an...@xiaomi.com>
---
sched/mqueue/mq_notify.c | 9 ++++++---
sched/mqueue/mq_receive.c | 10 ----------
sched/mqueue/mq_send.c | 5 -----
sched/mqueue/mq_sndinternal.c | 10 +++-------
sched/mqueue/mq_timedreceive.c | 11 -----------
sched/mqueue/mq_timedsend.c | 14 ++------------
6 files changed, 11 insertions(+), 48 deletions(-)
diff --git a/sched/mqueue/mq_notify.c b/sched/mqueue/mq_notify.c
index 021fa41402..378dbfd81b 100644
--- a/sched/mqueue/mq_notify.c
+++ b/sched/mqueue/mq_notify.c
@@ -29,6 +29,7 @@
#include <string.h>
#include <errno.h>
+#include <nuttx/irq.h>
#include <nuttx/sched.h>
#include "sched/sched.h"
@@ -97,11 +98,13 @@ int mq_notify(mqd_t mqdes, FAR const struct sigevent *notification)
FAR struct inode *inode;
FAR struct file *filep;
FAR struct tcb_s *rtcb;
+ irqstate_t flags;
int errval;
errval = fs_getfilep(mqdes, &filep);
if (errval < 0)
{
+ errval = -errval;
goto errout_without_lock;
}
@@ -119,7 +122,7 @@ int mq_notify(mqd_t mqdes, FAR const struct sigevent *notification)
/* Get a pointer to the message queue */
- sched_lock();
+ flags = enter_critical_section();
/* Get the current process ID */
@@ -177,11 +180,11 @@ int mq_notify(mqd_t mqdes, FAR const struct sigevent *notification)
nxsig_cancel_notification(&msgq->ntwork);
}
- sched_unlock();
+ leave_critical_section(flags);
return OK;
errout:
- sched_unlock();
+ leave_critical_section(flags);
errout_without_lock:
set_errno(errval);
diff --git a/sched/mqueue/mq_receive.c b/sched/mqueue/mq_receive.c
index 9586e641e8..1bd1c86546 100644
--- a/sched/mqueue/mq_receive.c
+++ b/sched/mqueue/mq_receive.c
@@ -99,15 +99,6 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg, size_t msglen,
return ret;
}
- /* Get the next message from the message queue. We will disable
- * pre-emption until we have completed the message received. This
- * is not too bad because if the receipt takes a long time, it will
- * be because we are blocked waiting for a message and pre-emption
- * will be re-enabled while we are blocked
- */
-
- sched_lock();
-
/* Furthermore, nxmq_wait_receive() expects to have interrupts disabled
* because messages can be sent from interrupt level.
*/
@@ -132,7 +123,6 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg, size_t msglen,
ret = nxmq_do_receive(msgq, mqmsg, msg, prio);
}
- sched_unlock();
return ret;
}
diff --git a/sched/mqueue/mq_send.c b/sched/mqueue/mq_send.c
index 1d47811b20..320d908ea7 100644
--- a/sched/mqueue/mq_send.c
+++ b/sched/mqueue/mq_send.c
@@ -94,10 +94,6 @@ int file_mq_send(FAR struct file *mq, FAR const char *msg, size_t msglen,
return ret;
}
- /* Get a pointer to the message queue */
-
- sched_lock();
-
/* Allocate a message structure:
* - Immediately if we are called from an interrupt handler.
* - Immediately if the message queue is not full, or
@@ -156,7 +152,6 @@ int file_mq_send(FAR struct file *mq, FAR const char *msg, size_t msglen,
ret = nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
}
- sched_unlock();
return ret;
}
diff --git a/sched/mqueue/mq_sndinternal.c b/sched/mqueue/mq_sndinternal.c
index 3c4b6e2d85..f71c7bd077 100644
--- a/sched/mqueue/mq_sndinternal.c
+++ b/sched/mqueue/mq_sndinternal.c
@@ -327,10 +327,6 @@ int nxmq_do_send(FAR struct mqueue_inode_s *msgq,
FAR struct mqueue_msg_s *prev;
irqstate_t flags;
- /* Get a pointer to the message queue */
-
- sched_lock();
-
/* Construct the message header info */
mqmsg->priority = prio;
@@ -405,8 +401,9 @@ int nxmq_do_send(FAR struct mqueue_inode_s *msgq,
{
/* Find the highest priority task that is waiting for
* this queue to be non-empty in g_waitingformqnotempty
- * list. sched_lock() should give us sufficient protection since
- * interrupts should never cause a change in this list
+ * list. leave_critical_section() should give us sufficient
+ * protection since interrupts should never cause a change
+ * in this list
*/
for (btcb = (FAR struct tcb_s *)g_waitingformqnotempty.head;
@@ -425,6 +422,5 @@ int nxmq_do_send(FAR struct mqueue_inode_s *msgq,
}
leave_critical_section(flags);
- sched_unlock();
return OK;
}
diff --git a/sched/mqueue/mq_timedreceive.c b/sched/mqueue/mq_timedreceive.c
index 981bcb0636..72dc8291c2 100644
--- a/sched/mqueue/mq_timedreceive.c
+++ b/sched/mqueue/mq_timedreceive.c
@@ -170,15 +170,6 @@ ssize_t file_mq_timedreceive(FAR struct file *mq, FAR char *msg,
return -EINVAL;
}
- /* Get the next message from the message queue. We will disable
- * pre-emption until we have completed the message received. This
- * is not too bad because if the receipt takes a long time, it will
- * be because we are blocked waiting for a message and pre-emption
- * will be re-enabled while we are blocked
- */
-
- sched_lock();
-
/* Furthermore, nxmq_wait_receive() expects to have interrupts disabled
* because messages can be sent from interrupt level.
*/
@@ -213,7 +204,6 @@ ssize_t file_mq_timedreceive(FAR struct file *mq, FAR char *msg,
if (result != OK)
{
leave_critical_section(flags);
- sched_unlock();
return -result;
}
@@ -250,7 +240,6 @@ ssize_t file_mq_timedreceive(FAR struct file *mq, FAR char *msg,
ret = nxmq_do_receive(msgq, mqmsg, msg, prio);
}
- sched_unlock();
return ret;
}
diff --git a/sched/mqueue/mq_timedsend.c b/sched/mqueue/mq_timedsend.c
index 94760b19a7..a26174927d 100644
--- a/sched/mqueue/mq_timedsend.c
+++ b/sched/mqueue/mq_timedsend.c
@@ -185,10 +185,6 @@ int file_mq_timedsend(FAR struct file *mq, FAR const char *msg,
return -ENOMEM;
}
- /* Get a pointer to the message queue */
-
- sched_lock();
-
/* OpenGroup.org: "Under no circumstance shall the operation fail with a
* timeout if there is sufficient room in the queue to add the message
* immediately. The validity of the abstime parameter need not be checked
@@ -209,9 +205,7 @@ int file_mq_timedsend(FAR struct file *mq, FAR const char *msg,
* Currently nxmq_do_send() always returns OK.
*/
- ret = nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
- sched_unlock();
- return ret;
+ return nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
}
/* The message queue is full... We are going to wait. Now we must have a
@@ -285,10 +279,7 @@ int file_mq_timedsend(FAR struct file *mq, FAR const char *msg,
* Currently nxmq_do_send() always returns OK.
*/
- ret = nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
-
- sched_unlock();
- return ret;
+ return nxmq_do_send(msgq, mqmsg, msg, msglen, prio);
/* Exit here with (1) the scheduler locked, (2) a message allocated, (3) a
* wdog allocated, and (4) interrupts disabled.
@@ -303,7 +294,6 @@ errout_in_critical_section:
errout_with_mqmsg:
nxmq_free_msg(mqmsg);
- sched_unlock();
return ret;
}