Skip to content

Commit

Permalink
Changes representative of linux-4.18.0-305.19.1.el8_4.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Sep 7, 2021
1 parent 86e1fe5 commit c1b2845
Show file tree
Hide file tree
Showing 54 changed files with 756 additions and 411 deletions.
2 changes: 1 addition & 1 deletion Makefile.rhelver
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ RHEL_MINOR = 4
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 305.17.1
RHEL_RELEASE = 305.19.1

#
# Early y+1 numbering
Expand Down
7 changes: 3 additions & 4 deletions arch/arm64/include/asm/asm-uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,9 @@ alternative_else_nop_endif
/*
* Remove the address tag from a virtual address, if present.
*/
.macro clear_address_tag, dst, addr
tst \addr, #(1 << 55)
bic \dst, \addr, #(0xff << 56)
csel \dst, \dst, \addr, eq
.macro untagged_addr, dst, addr
sbfx \dst, \addr, #0, #56
and \dst, \dst, \addr
.endm

#endif
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/exception.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,4 +41,6 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}

asmlinkage void enter_from_user_mode(void);

#endif /* __ASM_EXCEPTION_H */
26 changes: 18 additions & 8 deletions arch/arm64/include/asm/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,21 +228,31 @@ extern u64 vabits_user;
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) \
((__typeof__(addr))sign_extend64((u64)(addr), 55))
#define __untagged_addr(addr) \
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))

#define untagged_addr(addr) ({ \
u64 __addr = (__force u64)(addr); \
__addr &= __untagged_addr(__addr); \
(__force __typeof__(addr))__addr; \
})

#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
#define __tag_set(addr, tag) (__typeof__(addr))( \
((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_reset(addr) __untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
#define __tag_set(addr, tag) (addr)
#define __tag_shifted(tag) 0UL
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
#endif

static inline const void *__tag_set(const void *addr, u8 tag)
{
u64 __addr = (u64)addr & ~__tag_shifted(0xff);
return (const void *)(__addr | __tag_shifted(tag));
}

/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
Expand Down Expand Up @@ -329,8 +339,8 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
unsigned long __addr_tag = \
__tag_set(__addr, page_kasan_tag(page)); \
const void *__addr_tag = \
__tag_set((void *)__addr, page_kasan_tag(page)); \
((void *)__addr_tag); \
})

Expand Down
10 changes: 7 additions & 3 deletions arch/arm64/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
{
unsigned long ret, limit = current_thread_info()->addr_limit;

addr = untagged_addr(addr);

__chk_user_ptr(addr);
asm volatile(
// A + B <= C + 1 for all A,B,C, in four easy steps:
Expand Down Expand Up @@ -228,18 +230,20 @@ static inline void uaccess_enable_not_uao(void)

/*
* Sanitise a uaccess pointer such that it becomes NULL if above the
* current addr_limit.
* current addr_limit. In case the pointer is tagged (has the top byte set),
* untag the pointer before checking.
*/
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
{
void __user *safe_ptr;

asm volatile(
" bics xzr, %1, %2\n"
" bics xzr, %3, %2\n"
" csel %0, %1, xzr, eq\n"
: "=&r" (safe_ptr)
: "r" (ptr), "r" (current_thread_info()->addr_limit)
: "r" (ptr), "r" (current_thread_info()->addr_limit),
"r" (untagged_addr(ptr))
: "cc");

csdb();
Expand Down
85 changes: 46 additions & 39 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -41,19 +41,9 @@
* Context tracking subsystem. Used to instrument transitions
* between user and kernel mode.
*/
.macro ct_user_exit, syscall = 0
.macro ct_user_exit_irqoff
#ifdef CONFIG_CONTEXT_TRACKING
bl context_tracking_user_exit
.if \syscall == 1
/*
* Save/restore needed during syscalls. Restore syscall arguments from
* the values already saved on stack during kernel_entry.
*/
ldp x0, x1, [sp]
ldp x2, x3, [sp, #S_X2]
ldp x4, x5, [sp, #S_X4]
ldp x6, x7, [sp, #S_X6]
.endif
bl enter_from_user_mode
#endif
.endm

Expand Down Expand Up @@ -609,10 +599,8 @@ el1_sync:
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
b.eq el1_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
Expand All @@ -629,14 +617,16 @@ el1_da:
*/
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
untagged_addr x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort

kernel_exit 1
el1_sp_pc:
el1_pc:
/*
* Stack or PC alignment exception handling
* PC alignment exception handling. We don't handle SP alignment faults,
* since we will have hit a recursive exception when trying to push the
* initial pt_regs.
*/
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
Expand Down Expand Up @@ -754,9 +744,9 @@ el0_sync:
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
b.eq el0_sp
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
Expand All @@ -780,13 +770,13 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
b.eq el0_undef
b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
b.eq el0_undef
b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
Expand All @@ -810,16 +800,27 @@ el0_irq_compat:
el0_error_compat:
kernel_entry 0, 32
b el0_error_naked

el0_cp15:
/*
* Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
*/
ct_user_exit_irqoff
enable_daif
mov x0, x25
mov x1, sp
bl do_cp15instr
b ret_to_user
#endif

el0_da:
/*
* Data abort handling
*/
mrs x26, far_el1
ct_user_exit_irqoff
enable_daif
ct_user_exit
clear_address_tag x0, x26
untagged_addr x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
Expand All @@ -830,11 +831,11 @@ el0_ia:
*/
mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
Expand All @@ -844,8 +845,8 @@ el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
ct_user_exit_irqoff
enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
Expand All @@ -854,8 +855,8 @@ el0_sve_acc:
/*
* Scalable Vector Extension access
*/
ct_user_exit_irqoff
enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_sve_acc
Expand All @@ -864,23 +865,27 @@ el0_fpsimd_exc:
/*
* Floating Point, Advanced SIMD or SVE exception
*/
ct_user_exit_irqoff
enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
el0_sp:
ldr x26, [sp, #S_SP]
b el0_sp_pc
el0_pc:
mrs x26, far_el1
el0_sp_pc:
/*
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
ct_user_exit_irqoff
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
Expand All @@ -890,17 +895,17 @@ el0_undef:
/*
* Undefined instruction
*/
ct_user_exit_irqoff
enable_daif
ct_user_exit
mov x0, sp
bl do_undefinstr
b ret_to_user
el0_sys:
/*
* System instructions, for trapped cache maintenance instructions
*/
ct_user_exit_irqoff
enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_sysinstr
Expand All @@ -910,17 +915,18 @@ el0_dbg:
* Debug exception handling
*/
tbnz x24, #0, el0_inv // EL0 only
mrs x24, far_el1
gic_prio_kentry_setup tmp=x3
mrs x0, far_el1
ct_user_exit_irqoff
mov x0, x24
mov x1, x25
mov x2, sp
bl do_debug_exception
enable_da_f
ct_user_exit
b ret_to_user
el0_inv:
ct_user_exit_irqoff
enable_daif
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
Expand All @@ -933,13 +939,13 @@ el0_irq:
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
ct_user_exit_irqoff
enable_da_f

#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif

ct_user_exit
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
Expand All @@ -966,13 +972,14 @@ ENDPROC(el1_error)
el0_error:
kernel_entry 0
el0_error_naked:
mrs x1, esr_el1
mrs x25, esr_el1
gic_prio_kentry_setup tmp=x2
ct_user_exit_irqoff
enable_dbg
mov x0, sp
mov x1, x25
bl do_serror
enable_da_f
ct_user_exit
b ret_to_user
ENDPROC(el0_error)

Expand Down
Loading

0 comments on commit c1b2845

Please sign in to comment.