Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support nested interrupts on AArch32 (non-Cortex-M) #23636

Merged
merged 4 commits into from
Apr 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 26 additions & 14 deletions arch/arm/core/aarch32/exc_exit.S
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
Expand Down Expand Up @@ -73,11 +74,17 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)

#ifdef CONFIG_PREEMPT_ENABLED
ldr r0, =_kernel
ldr r3, =_kernel
ioannisg marked this conversation as resolved.
Show resolved Hide resolved

ldr r1, [r0, #_kernel_offset_to_current]
#ifndef CONFIG_CPU_CORTEX_M
/* Do not context switch if exiting a nested interrupt */
ldr r0, [r3, #_kernel_offset_to_nested]
cmp r0, #1
bhi _EXIT_EXC
#endif /* !CONFIG_CPU_CORTEX_M */

ldr r0, [r0, #_kernel_offset_to_ready_q_cache]
ldr r1, [r3, #_kernel_offset_to_current]
ldr r0, [r3, #_kernel_offset_to_ready_q_cache]
cmp r0, r1
beq _EXIT_EXC

Expand Down Expand Up @@ -113,23 +120,28 @@ _EXIT_EXC:
#if defined(CONFIG_CPU_CORTEX_M)
bx lr
#elif defined(CONFIG_CPU_CORTEX_R)
/* Disable nested interrupts while exiting */
cpsid i

/* Decrement interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
sub r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]

/* Restore previous stack pointer */
pop {r2, r3}
add sp, sp, r3

/*
* Restore r0-r3, r12 and lr stored into the process stack by the mode
* Restore r0-r3, r12 and lr_irq stored into the process stack by the mode
* entry function. These registers are saved by _isr_wrapper for IRQ mode
* and z_arm_svc for SVC mode.
*
* r0-r3 are either the values from the thread before it was switched out
* or they are the args to _new_thread for a new thread.
*/
push {r4-r6}
mrs r6, cpsr

cps #MODE_SYS
ldmia sp!, {r0-r5}
msr cpsr_c, r6

mov r12, r4
mov lr, r5
pop {r4-r6}
movs pc, lr
pop {r0-r3, r12, lr}
rfeia sp!
#endif
70 changes: 53 additions & 17 deletions arch/arm/core/aarch32/isr_wrapper.S
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
Expand Down Expand Up @@ -45,20 +46,39 @@ SECTION_FUNC(TEXT, _isr_wrapper)
push {r0,lr} /* r0, lr are now the first items on the stack */
#elif defined(CONFIG_CPU_CORTEX_R)
/*
* Save away r0-r3 from previous context to the process stack since
* they are clobbered here. Also, save away lr since we may swap
* processes and return to a different thread.
* Save away r0-r3, r12 and lr_irq for the previous context to the
* process stack since they are clobbered here. Also, save away lr
* and spsr_irq since we may swap processes and return to a different
* thread.
*/
push {r4, r5}
mov r4, r12
sub r5, lr, #4

sub lr, lr, #4
srsdb #MODE_SYS!
cps #MODE_SYS
stmdb sp!, {r0-r5}
cps #MODE_IRQ
push {r0-r3, r12, lr}

pop {r4, r5}
#endif
/*
* Use SVC mode stack for predictable interrupt behaviour; running ISRs
* in the SYS/USR mode stack (i.e. interrupted thread stack) leaves the
* ISR stack usage at the mercy of the interrupted thread and this can
* be prone to stack overflows if any of the ISRs and/or preemptible
* threads have high stack usage.
*
* When userspace is enabled, this also prevents leaking privileged
* information to the user mode.
*/
cps #MODE_SVC

/* Align stack at double-word boundary */
and r3, sp, #4
sub sp, sp, r3
push {r2, r3}

/* Increment interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
#endif /* CONFIG_CPU_CORTEX_M */

#ifdef CONFIG_EXECUTION_BENCHMARKING
bl read_timer_start_of_isr
Expand All @@ -78,12 +98,13 @@ SECTION_FUNC(TEXT, _isr_wrapper)
* is called with interrupts disabled.
*/

/*
* FIXME: Remove the Cortex-M conditional compilation checks for `cpsid i`
* and `cpsie i` after the Cortex-R port is updated to support
* interrupt nesting. For more details, refer to the issue #21758.
*/
#if defined(CONFIG_CPU_CORTEX_M)
/*
* Disable interrupts to prevent nesting while exiting idle state. This
* is only necessary for the Cortex-M because it is the only ARM
* architecture variant that automatically enables interrupts when
* entering an ISR.
*/
cpsid i /* PRIMASK = 1 */
#endif

Expand Down Expand Up @@ -146,6 +167,21 @@ _idle_state_cleared:
#else
#error Unknown ARM architecture
#endif /* CONFIG_CPU_CORTEX_M */

#if !defined(CONFIG_CPU_CORTEX_M)
/*
* Enable interrupts to allow nesting.
*
* Note that interrupts are disabled up to this point on the ARM
* architecture variants other than the Cortex-M. It is also important
* to note that that most interrupt controllers require that the nested
* interrupts are handled after the active interrupt is acknowledged;
* this is be done through the `get_active` interrupt controller
* interface function.
*/
cpsie i
#endif /* !CONFIG_CPU_CORTEX_M */

ldr r1, =_sw_isr_table
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
* in thumb mode */
Expand Down Expand Up @@ -186,7 +222,7 @@ _idle_state_cleared:
pop {r0, lr}
#elif defined(CONFIG_ARMV7_R)
/*
* r0,lr were saved on the process stack since a swap could
* r0 and lr_irq were saved on the process stack since a swap could
* happen. exc_exit will handle getting those values back
* from the process stack to return to the correct location
* so there is no need to do anything here.
Expand Down
31 changes: 20 additions & 11 deletions arch/arm/core/aarch32/swap_helper.S
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2017-2019 Nordic Semiconductor ASA.
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
Expand Down Expand Up @@ -110,8 +111,9 @@ out_fp_endif:
#endif /* CONFIG_FP_SHARING */
#elif defined(CONFIG_ARMV7_R)
/* Store rest of process context */
mrs r12, SPSR
stm r0, {r4-r12,sp,lr}^
cps #MODE_SYS
stm r0, {r4-r11, sp}
cps #MODE_SVC
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
Expand Down Expand Up @@ -315,9 +317,10 @@ _thread_irq_disabled:
ldr r0, =_thread_offset_to_callee_saved
add r0, r2

/* restore r4-r12 for incoming thread, plus system sp and lr */
ldm r0, {r4-r12,sp,lr}^
msr SPSR_fsxc, r12
/* restore r4-r11 and sp for incoming thread */
cps #MODE_SYS
ldm r0, {r4-r11, sp}
cps #MODE_SVC
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
Expand Down Expand Up @@ -585,15 +588,21 @@ SECTION_FUNC(TEXT, z_arm_svc)
* Save r12 and the lr as we could be swapping in another process and
* returning to a different location.
*/
push {r4, r5}
mov r4, r12
mov r5, lr

srsdb #MODE_SYS!
cps #MODE_SYS
stmdb sp!, {r0-r5}
push {r0-r3, r12, lr}
cps #MODE_SVC

pop {r4, r5}
/* Align stack at double-word boundary */
and r3, sp, #4
sub sp, sp, r3
push {r2, r3}

/* Increment interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]

/* Get SVC number */
mrs r0, spsr
Expand Down
16 changes: 8 additions & 8 deletions arch/arm/core/aarch32/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,19 +125,19 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pInitCtx->basic.a2 = (u32_t)parameter1;
pInitCtx->basic.a3 = (u32_t)parameter2;
pInitCtx->basic.a4 = (u32_t)parameter3;

#if defined(CONFIG_CPU_CORTEX_M)
pInitCtx->basic.xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
#else
pInitCtx->basic.xpsr = A_BIT | MODE_SYS;
#if defined(CONFIG_COMPILER_ISA_THUMB2)
pInitCtx->basic.xpsr |= T_BIT;
#endif /* CONFIG_COMPILER_ISA_THUMB2 */
#endif /* CONFIG_CPU_CORTEX_M */

thread->callee_saved.psp = (u32_t)pInitCtx;
#if defined(CONFIG_CPU_CORTEX_R)
pInitCtx->basic.lr = (u32_t)pInitCtx->basic.pc;
thread->callee_saved.spsr = A_BIT | MODE_SYS;
#if defined(CONFIG_COMPILER_ISA_THUMB2)
thread->callee_saved.spsr |= T_BIT;
#endif

thread->callee_saved.lr = (u32_t)pInitCtx->basic.pc;
#endif /* CONFIG_CPU_CORTEX_R */
thread->arch.basepri = 0;

#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)
Expand Down
4 changes: 0 additions & 4 deletions arch/arm/core/offsets/offsets_aarch32.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,6 @@ GEN_OFFSET_SYM(_callee_saved_t, v6);
GEN_OFFSET_SYM(_callee_saved_t, v7);
GEN_OFFSET_SYM(_callee_saved_t, v8);
GEN_OFFSET_SYM(_callee_saved_t, psp);
#if defined(CONFIG_CPU_CORTEX_R)
GEN_OFFSET_SYM(_callee_saved_t, spsr);
GEN_OFFSET_SYM(_callee_saved_t, lr);
#endif

/* size of the entire preempt registers structure */

Expand Down
11 changes: 1 addition & 10 deletions arch/arm/include/aarch32/cortex_a_r/exc.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,7 @@ extern volatile irq_offload_routine_t offload_routine;
/* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
static ALWAYS_INLINE bool arch_is_in_isr(void)
{
unsigned int status;

__asm__ volatile(
" mrs %0, cpsr"
: "=r" (status) : : "memory", "cc");
status &= MODE_MASK;

return (status == MODE_FIQ) ||
(status == MODE_IRQ) ||
(status == MODE_SVC);
return (_kernel.nested != 0);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion boards/arm/qemu_cortex_r5/board.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ set(QEMU_FLAGS_${ARCH}
)

set(QEMU_KERNEL_OPTION
"-device;loader,file=$<TARGET_FILE:zephyr_final>,cpu-num=4"
"-device;loader,file=\$<TARGET_FILE:\${logical_target_for_zephyr_elf}>,cpu-num=4"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Interesting, I can't recall seeing this before.

Another tool in the toolbox.

"-device;loader,addr=0xff5e023c,data=0x80008fde,data-len=4"
"-device;loader,addr=0xff9a0000,data=0x80000218,data-len=4"
)
Expand Down
1 change: 0 additions & 1 deletion boards/arm/qemu_cortex_r5/qemu_cortex_r5.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,5 @@ testing:
default: true
ignore_tags:
- benchmark
- interrupt
- memory_protection
- userspace
2 changes: 2 additions & 0 deletions cmake/emu/qemu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,8 @@ if(DEFINED QEMU_KERNEL_FILE)
set(QEMU_KERNEL_OPTION "-kernel;${QEMU_KERNEL_FILE}")
elseif(NOT DEFINED QEMU_KERNEL_OPTION)
set(QEMU_KERNEL_OPTION "-kernel;$<TARGET_FILE:${logical_target_for_zephyr_elf}>")
elseif(DEFINED QEMU_KERNEL_OPTION)
string(CONFIGURE "${QEMU_KERNEL_OPTION}" QEMU_KERNEL_OPTION)
endif()

foreach(target ${qemu_targets})
Expand Down
6 changes: 0 additions & 6 deletions include/arch/arm/aarch32/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,7 @@ struct _callee_saved {
u32_t v6; /* r9 */
u32_t v7; /* r10 */
u32_t v8; /* r11 */
#if defined(CONFIG_CPU_CORTEX_R)
u32_t spsr;/* r12 */
u32_t psp; /* r13 */
u32_t lr; /* r14 */
#else
u32_t psp; /* r13 */
#endif
};

typedef struct _callee_saved _callee_saved_t;
Expand Down
5 changes: 1 addition & 4 deletions tests/kernel/interrupt/src/nested_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,9 @@

/*
* Run the nested interrupt test for the supported platforms only.
*
* NOTE: Cortex-R platforms with the ARM GIC are skipped because the arch port
* for these architectures do not support interrupt nesting yet.
*/
#if defined(CONFIG_CPU_CORTEX_M) || defined(CONFIG_CPU_ARCV2) || \
(defined(CONFIG_GIC) && !defined(CONFIG_CPU_CORTEX_R))
defined(CONFIG_GIC)
#define TEST_NESTED_ISR
#endif

Expand Down