You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nuttx.apache.org by ma...@apache.org on 2022/02/23 06:04:42 UTC
[incubator-nuttx] 02/04: arch/armv7-r: unify switch context from software interrupt
This is an automated email from the ASF dual-hosted git repository.
masayuki pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git
commit db3a40ac25d0845e9118746bfdf996846a368728
Author: chao.an <an...@xiaomi.com>
AuthorDate: Wed Feb 16 13:01:47 2022 +0800
arch/armv7-r: unify switch context from software interrupt
Signed-off-by: chao.an <an...@xiaomi.com>
---
arch/arm/src/armv7-r/arm_blocktask.c | 27 +++++-----
arch/arm/src/armv7-r/arm_fullcontextrestore.S | 74 ---------------------------
arch/arm/src/armv7-r/arm_releasepending.c | 28 +++++-----
arch/arm/src/armv7-r/arm_reprioritizertr.c | 29 ++++++-----
arch/arm/src/armv7-r/arm_syscall.c | 50 ++++++++++++------
arch/arm/src/armv7-r/arm_unblocktask.c | 29 +++++------
arch/arm/src/armv7-r/svcall.h | 68 +++++++++++++-----------
7 files changed, 129 insertions(+), 176 deletions(-)
diff --git a/arch/arm/src/armv7-r/arm_blocktask.c b/arch/arm/src/armv7-r/arm_blocktask.c
index 6dafcc1..7e5dacb 100644
--- a/arch/arm/src/armv7-r/arm_blocktask.c
+++ b/arch/arm/src/armv7-r/arm_blocktask.c
@@ -123,26 +123,27 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
arm_restorestate(rtcb->xcp.regs);
}
- /* Copy the user C context into the TCB at the (old) head of the
- * ready-to-run Task list. if arm_saveusercontext returns a non-zero
- * value, then this is really the previously running task restarting!
- */
+ /* No, then we will need to perform the user context switch */
- else if (!arm_saveusercontext(rtcb->xcp.regs))
+ else
{
- /* Restore the exception context of the rtcb at the (new) head
- * of the ready-to-run task list.
- */
-
- rtcb = this_task();
+ struct tcb_s *nexttcb = this_task();
/* Reset scheduler parameters */
- nxsched_resume_scheduler(rtcb);
+ nxsched_resume_scheduler(nexttcb);
+
+ /* Switch context to the context of the task at the head of the
+ * ready to run list.
+ */
- /* Then switch contexts */
+ arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
- arm_fullcontextrestore(rtcb->xcp.regs);
+ /* arm_switchcontext forces a context switch to the task at the
+ * head of the ready-to-run list. It does not 'return' in the
+ * normal sense. When it does return, it is because the blocked
+ * task is again ready to run and has execution priority.
+ */
}
}
}
diff --git a/arch/arm/src/armv7-r/arm_fullcontextrestore.S b/arch/arm/src/armv7-r/arm_fullcontextrestore.S
index 1d806be..9af7219 100644
--- a/arch/arm/src/armv7-r/arm_fullcontextrestore.S
+++ b/arch/arm/src/armv7-r/arm_fullcontextrestore.S
@@ -62,44 +62,6 @@
.type arm_fullcontextrestore, function
arm_fullcontextrestore:
- /* On entry, a1 (r0) holds address of the register save area. All other
- * registers are available for use.
- */
-
-#ifdef CONFIG_ARCH_FPU
- /* First, restore the floating point registers. Lets do this before we
- * restore the ARM registers so that we have plenty of registers to
- * work with.
- */
-
- add r1, r0, #(4*REG_S0) /* r1=Address of FP register storage */
-
- /* Load all floating point registers. Registers are loaded in numeric order,
- * s0, s1, ... in increasing address order.
- */
-
-#ifdef CONFIG_ARM_HAVE_FPU_D32
- vldmia.64 r1!, {d0-d15} /* Restore the full FP context */
- vldmia.64 r1!, {d16-d31}
-#else
- vldmia r1!, {s0-s31} /* Restore the full FP context */
-#endif
-
- /* Load the floating point control and status register. At the end of the
- * vstmia, r1 will point to the FPSCR storage location.
- */
-
- ldr r2, [r1], #4 /* Fetch the floating point control and status register */
- vmsr fpscr, r2 /* Restore the FPSCR */
-#endif
-
-#ifdef CONFIG_BUILD_PROTECTED
- /* For the protected build, we need to be able to transition gracefully
- * between kernel- and user-mode tasks. Here we do that with a system
- * call; the system call will execute in kernel mode and but can return
- * to either user or kernel mode.
- */
-
/* Perform the System call with R0=1 and R1=regs */
mov r1, r0 /* R1: regs */
@@ -109,41 +71,5 @@ arm_fullcontextrestore:
/* This call should not return */
bx lr /* Unnecessary ... will not return */
-
-#else
- /* For a flat build, we can do all of this here... Just think of this as
- * a longjmp() all on steroids.
- */
-
- /* Recover all registers except for r0, r1, R15, and CPSR */
-
- add r1, r0, #(4*REG_R2) /* Offset to REG_R2 storage */
- ldmia r1, {r2-r14} /* Recover registers */
-
- /* Create a stack frame to hold the some registers */
-
- sub sp, sp, #(3*4) /* Frame for three registers */
- ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
- str r1, [sp] /* Save it at the top of the stack */
- ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
- str r1, [sp, #4] /* Save it in the stack */
- ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
- str r1, [sp, #8] /* Save it at the bottom of the frame */
-
- /* Now we can restore the CPSR. We wait until we are completely
- * finished with the context save data to do this. Restore the CPSR
- * may re-enable and interrupts and we could be in a context
- * where the save structure is only protected by interrupts being
- * disabled.
- */
-
- ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
- msr spsr_cxsf, r1 /* Set the SPSR */
-
- /* Now recover r0-r1, pc and cpsr, destroying the stack frame */
-
- ldmia sp!, {r0-r1, pc}^
-#endif
-
.size arm_fullcontextrestore, .-arm_fullcontextrestore
.end
diff --git a/arch/arm/src/armv7-r/arm_releasepending.c b/arch/arm/src/armv7-r/arm_releasepending.c
index 689c355..43b78f2 100644
--- a/arch/arm/src/armv7-r/arm_releasepending.c
+++ b/arch/arm/src/armv7-r/arm_releasepending.c
@@ -93,27 +93,27 @@ void up_release_pending(void)
arm_restorestate(rtcb->xcp.regs);
}
- /* Copy the exception context into the TCB of the task that
- * was currently active. if arm_saveusercontext returns a non-zero
- * value, then this is really the previously running task
- * restarting!
- */
+ /* No, then we will need to perform the user context switch */
- else if (!arm_saveusercontext(rtcb->xcp.regs))
+ else
{
- /* Restore the exception context of the rtcb at the (new) head
- * of the ready-to-run task list.
- */
-
- rtcb = this_task();
+ struct tcb_s *nexttcb = this_task();
/* Update scheduler parameters */
- nxsched_resume_scheduler(rtcb);
+ nxsched_resume_scheduler(nexttcb);
- /* Then switch contexts */
+ /* Switch context to the context of the task at the head of the
+ * ready to run list.
+ */
- arm_fullcontextrestore(rtcb->xcp.regs);
+ arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
+
+ /* arm_switchcontext forces a context switch to the task at the
+ * head of the ready-to-run list. It does not 'return' in the
+ * normal sense. When it does return, it is because the blocked
+ * task is again ready to run and has execution priority.
+ */
}
}
}
diff --git a/arch/arm/src/armv7-r/arm_reprioritizertr.c b/arch/arm/src/armv7-r/arm_reprioritizertr.c
index 44c41b6..6852177 100644
--- a/arch/arm/src/armv7-r/arm_reprioritizertr.c
+++ b/arch/arm/src/armv7-r/arm_reprioritizertr.c
@@ -147,27 +147,28 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
arm_restorestate(rtcb->xcp.regs);
}
- /* Copy the exception context into the TCB at the (old) head of
- * the ready-to-run Task list. if arm_saveusercontext returns a
- * non-zero value, then this is really the previously running task
- * restarting!
- */
+ /* No, then we will need to perform the user context switch */
- else if (!arm_saveusercontext(rtcb->xcp.regs))
+ else
{
- /* Restore the exception context of the rtcb at the (new) head
- * of the ready-to-run task list.
- */
-
- rtcb = this_task();
+ struct tcb_s *nexttcb = this_task();
/* Update scheduler parameters */
- nxsched_resume_scheduler(rtcb);
+ nxsched_resume_scheduler(nexttcb);
- /* Then switch contexts */
+ /* Switch context to the context of the task at the head of the
+ * ready to run list.
+ */
- arm_fullcontextrestore(rtcb->xcp.regs);
+ arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
+
+ /* arm_switchcontext forces a context switch to the task at the
+ * head of the ready-to-run list. It does not 'return' in the
+ * normal sense. When it does return, it is because the
+ * blocked task is again ready to run and has execution
+ * priority.
+ */
}
}
}
diff --git a/arch/arm/src/armv7-r/arm_syscall.c b/arch/arm/src/armv7-r/arm_syscall.c
index 06ddc27..719d270 100644
--- a/arch/arm/src/armv7-r/arm_syscall.c
+++ b/arch/arm/src/armv7-r/arm_syscall.c
@@ -120,7 +120,6 @@ static void dispatch_syscall(void)
*
****************************************************************************/
-#ifdef CONFIG_LIB_SYSCALL
uint32_t *arm_syscall(uint32_t *regs)
{
uint32_t cmd;
@@ -165,6 +164,7 @@ uint32_t *arm_syscall(uint32_t *regs)
* unprivileged thread mode.
*/
+#ifdef CONFIG_LIB_SYSCALL
case SYS_syscall_return:
{
FAR struct tcb_s *rtcb = nxsched_self();
@@ -213,6 +213,7 @@ uint32_t *arm_syscall(uint32_t *regs)
(void)nxsig_unmask_pendingsignal();
}
break;
+#endif
/* R0=SYS_restore_context: Restore task context
*
@@ -225,7 +226,6 @@ uint32_t *arm_syscall(uint32_t *regs)
* R1 = restoreregs
*/
-#ifdef CONFIG_BUILD_PROTECTED
case SYS_restore_context:
{
/* Replace 'regs' with the pointer to the register set in
@@ -233,11 +233,40 @@ uint32_t *arm_syscall(uint32_t *regs)
* set will determine the restored context.
*/
+ arm_restorefpu((uint32_t *)regs[REG_R1]);
regs = (uint32_t *)regs[REG_R1];
DEBUGASSERT(regs);
}
break;
+
+ /* R0=SYS_switch_context: This a switch context command:
+ *
+ * void arm_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
+ *
+ * At this point, the following values are saved in context:
+ *
+ * R0 = SYS_switch_context
+ * R1 = saveregs
+ * R2 = restoreregs
+ *
+ * In this case, we do both: We save the context registers to the save
+ * register area reference by the saved contents of R1 and then set
+ * regs to the save register area referenced by the saved
+ * contents of R2.
+ */
+
+ case SYS_switch_context:
+ {
+ DEBUGASSERT(regs[REG_R1] != 0 && regs[REG_R2] != 0);
+#if defined(CONFIG_ARCH_FPU)
+ arm_copyarmstate((uint32_t *)regs[REG_R1], regs);
+ arm_restorefpu((uint32_t *)regs[REG_R2]);
+#else
+ memcpy((uint32_t *)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
#endif
+ regs = (uint32_t *)regs[REG_R2];
+ }
+ break;
/* R0=SYS_task_start: This a user task start
*
@@ -452,9 +481,6 @@ uint32_t *arm_syscall(uint32_t *regs)
/* Indicate that we are in a syscall handler. */
rtcb->flags |= TCB_FLAG_SYSCALL;
-#else
- svcerr("ERROR: Bad SYS call: %d\n", regs[REG_R0]);
-#endif
#ifdef CONFIG_ARCH_KERNEL_STACK
/* If this is the first SYSCALL and if there is an allocated
@@ -472,6 +498,9 @@ uint32_t *arm_syscall(uint32_t *regs)
/* Save the new SYSCALL nesting level */
rtcb->xcp.nsyscalls = index + 1;
+#else
+ svcerr("ERROR: Bad SYS call: 0x%" PRIx32 "\n", regs[REG_R0]);
+#endif
}
break;
}
@@ -494,14 +523,3 @@ uint32_t *arm_syscall(uint32_t *regs)
return regs;
}
-
-#else
-
-uint32_t *arm_syscall(uint32_t *regs)
-{
- _alert("SYSCALL from 0x%x\n", regs[REG_PC]);
- CURRENT_REGS = regs;
- PANIC();
-}
-
-#endif
diff --git a/arch/arm/src/armv7-r/arm_unblocktask.c b/arch/arm/src/armv7-r/arm_unblocktask.c
index 0d4d489..d10d7a2 100644
--- a/arch/arm/src/armv7-r/arm_unblocktask.c
+++ b/arch/arm/src/armv7-r/arm_unblocktask.c
@@ -121,28 +121,27 @@ void up_unblock_task(struct tcb_s *tcb)
arm_restorestate(rtcb->xcp.regs);
}
- /* We are not in an interrupt handler. Copy the user C context
- * into the TCB of the task that was previously active. if
- * arm_saveusercontext returns a non-zero value, then this is really
- * the previously running task restarting!
- */
+ /* No, then we will need to perform the user context switch */
- else if (!arm_saveusercontext(rtcb->xcp.regs))
+ else
{
- /* Restore the exception context of the new task that is ready to
- * run (probably tcb). This is the new rtcb at the head of the
- * ready-to-run task list.
- */
-
- rtcb = this_task();
+ struct tcb_s *nexttcb = this_task();
/* Update scheduler parameters */
- nxsched_resume_scheduler(rtcb);
+ nxsched_resume_scheduler(nexttcb);
+
+ /* Switch context to the context of the task at the head of the
+ * ready to run list.
+ */
- /* Then switch contexts */
+ arm_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
- arm_fullcontextrestore(rtcb->xcp.regs);
+ /* arm_switchcontext forces a context switch to the task at the
+ * head of the ready-to-run list. It does not 'return' in the
+ * normal sense. When it does return, it is because the blocked
+ * task is again ready to run and has execution priority.
+ */
}
}
}
diff --git a/arch/arm/src/armv7-r/svcall.h b/arch/arm/src/armv7-r/svcall.h
index 2470da7..b0efecd 100644
--- a/arch/arm/src/armv7-r/svcall.h
+++ b/arch/arm/src/armv7-r/svcall.h
@@ -29,8 +29,6 @@
#include <syscall.h>
-#ifdef CONFIG_LIB_SYSCALL
-
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
@@ -43,17 +41,19 @@
* more syscall values must be reserved.
*/
-#ifdef CONFIG_BUILD_PROTECTED
-# ifndef CONFIG_SYS_RESERVED
-# error "CONFIG_SYS_RESERVED must be defined to have the value 6"
-# elif CONFIG_SYS_RESERVED != 6
-# error "CONFIG_SYS_RESERVED must have the value 6"
-# endif
-#else
-# ifndef CONFIG_SYS_RESERVED
-# error "CONFIG_SYS_RESERVED must be defined to have the value 1"
-# elif CONFIG_SYS_RESERVED != 1
-# error "CONFIG_SYS_RESERVED must have the value 1"
+#ifdef CONFIG_LIB_SYSCALL
+# ifdef CONFIG_BUILD_KERNEL
+# ifndef CONFIG_SYS_RESERVED
+# error "CONFIG_SYS_RESERVED must be defined to have the value 7"
+# elif CONFIG_SYS_RESERVED != 7
+# error "CONFIG_SYS_RESERVED must have the value 7"
+# endif
+# else
+# ifndef CONFIG_SYS_RESERVED
+# error "CONFIG_SYS_RESERVED must be defined to have the value 4"
+# elif CONFIG_SYS_RESERVED != 4
+# error "CONFIG_SYS_RESERVED must have the value 4"
+# endif
# endif
#endif
@@ -61,60 +61,68 @@
/* SYS call 0:
*
- * void arm_syscall_return(void);
+ * void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
*/
-#define SYS_syscall_return (0)
+#define SYS_restore_context (0)
-#ifndef CONFIG_BUILD_FLAT
-#ifdef CONFIG_BUILD_PROTECTED
/* SYS call 1:
*
- * void arm_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
+ * void arm_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
*/
-#define SYS_restore_context (1)
+#define SYS_switch_context (1)
+#ifdef CONFIG_LIB_SYSCALL
/* SYS call 2:
*
+ * void arm_syscall_return(void);
+ */
+
+#define SYS_syscall_return (2)
+
+#ifndef CONFIG_BUILD_FLAT
+#ifdef CONFIG_BUILD_KERNEL
+/* SYS call 3:
+ *
* void up_task_start(main_t taskentry, int argc, FAR char *argv[])
* noreturn_function;
*/
-#define SYS_task_start (2)
+#define SYS_task_start (3)
-/* SYS call 4:
+/* SYS call 5:
*
- * void signal_handler(_sa_sigaction_t sighand, int signo,
- * FAR siginfo_t *info,
+ * void signal_handler(_sa_sigaction_t sighand,
+ * int signo, FAR siginfo_t *info,
* FAR void *ucontext);
*/
-#define SYS_signal_handler (4)
+#define SYS_signal_handler (5)
-/* SYS call 5:
+/* SYS call 6:
*
* void signal_handler_return(void);
*/
-#define SYS_signal_handler_return (5)
+#define SYS_signal_handler_return (6)
-#endif /* CONFIG_BUILD_PROTECTED */
+#endif /* !CONFIG_BUILD_FLAT */
-/* SYS call 3:
+/* SYS call 4:
*
* void up_pthread_start(pthread_startroutine_t startup,
* pthread_startroutine_t entrypt, pthread_addr_t arg)
* noreturn_function
*/
-#define SYS_pthread_start (3)
+#define SYS_pthread_start (4)
#endif /* !CONFIG_BUILD_FLAT */
+#endif /* CONFIG_LIB_SYSCALL */
/****************************************************************************
* Inline Functions
****************************************************************************/
-#endif /* CONFIG_LIB_SYSCALL */
#endif /* __ARCH_ARM_SRC_ARMV7_R_SVCALL_H */