Skip to content

Commit

Permalink
arm64: errata: Add workaround for Cortex-A76 erratum #1463225
Browse files Browse the repository at this point in the history
Revisions of the Cortex-A76 CPU prior to r4p0 are affected by an erratum
that can prevent interrupts from being taken when single-stepping.

This patch implements a software workaround to prevent userspace from
effectively being able to disable interrupts.

Cc: <stable@vger.kernel.org>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
wildea01 committed May 23, 2019
1 parent 3e29ead commit 969f5ea
Show file tree
Hide file tree
Showing 6 changed files with 109 additions and 1 deletion.
1 change: 1 addition & 0 deletions Documentation/arm64/silicon-errata.txt
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ stable kernels.
| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
Expand Down
18 changes: 18 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,24 @@ config ARM64_ERRATUM_1286807

If unsure, say Y.

config ARM64_ERRATUM_1463225
bool "Cortex-A76: Software Step might prevent interrupt recognition"
default y
help
This option adds a workaround for Arm Cortex-A76 erratum 1463225.

On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
of a system call instruction (SVC) can prevent recognition of
subsequent interrupts when software stepping is disabled in the
exception handler of the system call and either kernel debugging
is enabled or VHE is in use.

Work around the erratum by triggering a dummy step exception
when handling a system call from a task that is being stepped
in a VHE configuration of the kernel.

If unsure, say Y.

config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cpucaps.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@
#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
#define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43
#define ARM64_WORKAROUND_1463225 44

#define ARM64_NCAPS 44
#define ARM64_NCAPS 45

#endif /* __ASM_CPUCAPS_H */
24 changes: 24 additions & 0 deletions arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,22 @@ static const struct midr_range arm64_ssb_cpus[] = {
{},
};

#ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);

static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
/* Cortex-A76 r0p0 - r3p1 */
struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);

WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
}
#endif

static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
Expand Down Expand Up @@ -823,6 +839,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_1165522,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1463225
{
.desc = "ARM erratum 1463225",
.capability = ARM64_WORKAROUND_1463225,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
},
#endif
{
}
Expand Down
31 changes: 31 additions & 0 deletions arch/arm64/kernel/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/syscalls.h>

#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
Expand Down Expand Up @@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);

#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);

static void cortex_a76_erratum_1463225_svc_handler(void)
{
u32 reg, val;

if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
return;

if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
return;

__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
reg = read_sysreg(mdscr_el1);
val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
write_sysreg(val, mdscr_el1);
asm volatile("msr daifclr, #8");
isb();

/* We will have taken a single-step exception by this point */

write_sysreg(reg, mdscr_el1);
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
}
#else
static void cortex_a76_erratum_1463225_svc_handler(void) { }
#endif /* CONFIG_ARM64_ERRATUM_1463225 */

static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
Expand All @@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;

cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();

Expand Down
33 changes: 33 additions & 0 deletions arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -810,13 +810,46 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}

#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);

static int __exception
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;

if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
return 0;

/*
* We've taken a dummy step exception from the kernel to ensure
* that interrupts are re-enabled on the syscall path. Return back
* to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
* masked so that we can safely restore the mdscr and get on with
* handling the syscall.
*/
regs->pstate |= PSR_D_BIT;
return 1;
}
#else
static int __exception
cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */

asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);

if (cortex_a76_erratum_1463225_debug_handler(regs))
return;

/*
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
* already disabled to preserve the last enabled/disabled addresses.
Expand Down

0 comments on commit 969f5ea

Please sign in to comment.