Skip to content

Commit

Permalink
Changes representative of linux-3.10.0-1160.80.1.el7.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Oct 8, 2022
1 parent 3296256 commit bd9b51a
Show file tree
Hide file tree
Showing 161 changed files with 2,256 additions and 819 deletions.
36 changes: 35 additions & 1 deletion Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3317,6 +3317,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

retain_initrd [RAM] Keep initrd memory after extraction

retbleed= [X86] Control mitigation of RETBleed (Arbitrary
Speculative Code Execution with Return Instructions)
vulnerability.

off - no mitigation
auto - automatically select a migitation
auto,nosmt - automatically select a mitigation,
disabling SMT if necessary for
the full mitigation (only on Zen1
and older without STIBP).
ibpb - mitigate short speculation windows on
basic block boundaries too. Safe, highest
perf impact.
unret - force enable untrained return thunks,
only effective on AMD f15h-f17h
based systems.
unret,nosmt - like unret, will disable SMT when STIBP
is not available.

Selecting 'auto' will choose a mitigation method at run
time according to the CPU.

Not specifying this option is equivalent to retbleed=auto.

rhash_entries= [KNL,NET]
Set number of hash buckets for route cache

Expand Down Expand Up @@ -3504,13 +3528,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

Specific mitigations can also be selected manually:

retpoline - replace indirect branches
retpoline[,force] - replace indirect branches
ibrs - Intel: Indirect Branch Restricted Speculation (kernel)
ibrs_always - Intel: Indirect Branch Restricted Speculation (kernel and user space)

Not specifying this option is equivalent to
spectre_v2=auto.

NOTE: with the advent of RETBleed, IBRS becomes the
defacto speculation control protection mechanism for
Intel CPUs affected by it. Specifying 'retpoline' as
spectre_v2 mitigation will now become equivalent to
spectre_v2=auto for such CPUs.

adding ',force' to spectre_v2=retpoline will make it
keep the old behavior, which means the system will
remain VULNERABLE to RETBleed.

spec_store_bypass_disable=
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
(Speculative Store Bypass vulnerability)
Expand Down
7 changes: 7 additions & 0 deletions Documentation/spec_ctrl.txt
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,13 @@ retp_enabled 1: Retpolines are enabled in the kernel, which replace all
which adds user-to-user and guest-to-guest protection
across context switches.

With the advent of RETBleed, IBRS becomes the defacto
speculation control protection mechanism for Intel CPUs
affected by it, and retp_enabled=1 will fallback to
enabling IBRS instead of Retpolines on those CPUs.

retp_enabled 2: Retpolines are force-enabled on Intel RETBleed affected CPUs.

ibrs_enabled 0: Disabled

ibrs_enabled 1: IBRS enabled in kernel mode.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ EXTRAVERSION =
NAME = Unicycling Gorilla
RHEL_MAJOR = 7
RHEL_MINOR = 9
RHEL_RELEASE = 1160.76.1
RHEL_RELEASE = 1160.80.1

#
# DRM backport version
Expand Down
5 changes: 4 additions & 1 deletion arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,12 @@ KBUILD_AFLAGS += $(mflags-y)

# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register -mfunction-return=thunk-extern)
RETPOLINE_VDSO_CFLAGS += $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
ifneq ($(RETPOLINE_CFLAGS),)
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
else
$(error CONFIG_RETPOLINE=y, but not supported by the compiler. Compiler update recommended.)
endif
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/boot/compressed/mem_encrypt.S
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ ENTRY(get_sev_encryption_bit)
#endif /* CONFIG_AMD_MEM_ENCRYPT */

ret
int3
ENDPROC(get_sev_encryption_bit)

.code64
Expand Down Expand Up @@ -96,4 +97,5 @@ ENTRY(get_sev_encryption_mask)
#endif

ret
int3
ENDPROC(get_sev_encryption_mask)
4 changes: 2 additions & 2 deletions arch/x86/crypto/aes-i586-asm_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ ENTRY(aes_enc_blk)
pop %ebx
mov %r0,(%ebp)
pop %ebp
ret
RET
ENDPROC(aes_enc_blk)

// AES (Rijndael) Decryption Subroutine
Expand Down Expand Up @@ -358,5 +358,5 @@ ENTRY(aes_dec_blk)
pop %ebx
mov %r0,(%ebp)
pop %ebp
ret
RET
ENDPROC(aes_dec_blk)
2 changes: 1 addition & 1 deletion arch/x86/crypto/aes-x86_64-asm_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@
movl r6 ## E,4(r9); \
movl r7 ## E,8(r9); \
movl r8 ## E,12(r9); \
ret; \
RET; \
ENDPROC(FUNC);

#define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/crypto/aes_ctrby8_avx-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ ddq_add_8:
/* return updated IV */
vpshufb xbyteswap, xcounter, xcounter
vmovdqu xcounter, (p_iv)
ret
RET
.endm

/*
Expand Down
42 changes: 21 additions & 21 deletions arch/x86/crypto/aesni-intel_asm.S
Original file line number Diff line number Diff line change
Expand Up @@ -1566,7 +1566,7 @@ _return_T_done_decrypt:
pop %r14
pop %r13
pop %r12
ret
RET
ENDPROC(aesni_gcm_dec)


Expand Down Expand Up @@ -1847,7 +1847,7 @@ _return_T_done_encrypt:
pop %r14
pop %r13
pop %r12
ret
RET
ENDPROC(aesni_gcm_enc)

#endif
Expand All @@ -1864,7 +1864,7 @@ _key_expansion_256a:
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
RET
ENDPROC(_key_expansion_128)
ENDPROC(_key_expansion_256a)

Expand All @@ -1890,7 +1890,7 @@ _key_expansion_192a:
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
ret
RET
ENDPROC(_key_expansion_192a)

.align 4
Expand All @@ -1910,7 +1910,7 @@ _key_expansion_192b:

movaps %xmm0, (TKEYP)
add $0x10, TKEYP
ret
RET
ENDPROC(_key_expansion_192b)

.align 4
Expand All @@ -1923,7 +1923,7 @@ _key_expansion_256b:
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
ret
RET
ENDPROC(_key_expansion_256b)

/*
Expand Down Expand Up @@ -2038,7 +2038,7 @@ ENTRY(aesni_set_key)
popl KEYP
#endif
FRAME_END
ret
RET
ENDPROC(aesni_set_key)

/*
Expand All @@ -2062,7 +2062,7 @@ ENTRY(aesni_enc)
popl KEYP
#endif
FRAME_END
ret
RET
ENDPROC(aesni_enc)

/*
Expand Down Expand Up @@ -2120,7 +2120,7 @@ _aesni_enc1:
AESENC KEY STATE
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
ret
RET
ENDPROC(_aesni_enc1)

/*
Expand Down Expand Up @@ -2229,7 +2229,7 @@ _aesni_enc4:
AESENCLAST KEY STATE2
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
ret
RET
ENDPROC(_aesni_enc4)

/*
Expand All @@ -2254,7 +2254,7 @@ ENTRY(aesni_dec)
popl KEYP
#endif
FRAME_END
ret
RET
ENDPROC(aesni_dec)

/*
Expand Down Expand Up @@ -2312,7 +2312,7 @@ _aesni_dec1:
AESDEC KEY STATE
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
ret
RET
ENDPROC(_aesni_dec1)

/*
Expand Down Expand Up @@ -2421,7 +2421,7 @@ _aesni_dec4:
AESDECLAST KEY STATE2
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
ret
RET
ENDPROC(_aesni_dec4)

/*
Expand Down Expand Up @@ -2481,7 +2481,7 @@ ENTRY(aesni_ecb_enc)
popl LEN
#endif
FRAME_END
ret
RET
ENDPROC(aesni_ecb_enc)

/*
Expand Down Expand Up @@ -2542,7 +2542,7 @@ ENTRY(aesni_ecb_dec)
popl LEN
#endif
FRAME_END
ret
RET
ENDPROC(aesni_ecb_dec)

/*
Expand Down Expand Up @@ -2586,7 +2586,7 @@ ENTRY(aesni_cbc_enc)
popl IVP
#endif
FRAME_END
ret
RET
ENDPROC(aesni_cbc_enc)

/*
Expand Down Expand Up @@ -2679,7 +2679,7 @@ ENTRY(aesni_cbc_dec)
popl IVP
#endif
FRAME_END
ret
RET
ENDPROC(aesni_cbc_dec)

#ifdef __x86_64__
Expand Down Expand Up @@ -2708,7 +2708,7 @@ _aesni_inc_init:
mov $1, TCTR_LOW
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
ret
RET
ENDPROC(_aesni_inc_init)

/*
Expand Down Expand Up @@ -2737,7 +2737,7 @@ _aesni_inc:
.Linc_low:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
ret
RET
ENDPROC(_aesni_inc)

/*
Expand Down Expand Up @@ -2800,7 +2800,7 @@ ENTRY(aesni_ctr_enc)
movups IV, (IVP)
.Lctr_enc_just_ret:
FRAME_END
ret
RET
ENDPROC(aesni_ctr_enc)

/*
Expand Down Expand Up @@ -2928,7 +2928,7 @@ ENTRY(aesni_xts_crypt8)
movdqu STATE4, 0x70(OUTP)

FRAME_END
ret
RET
ENDPROC(aesni_xts_crypt8)

#endif
12 changes: 6 additions & 6 deletions arch/x86/crypto/aesni-intel_avx-x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -1557,7 +1557,7 @@ ENTRY(aesni_gcm_precomp_avx_gen2)
pop %r14
pop %r13
pop %r12
ret
RET
ENDPROC(aesni_gcm_precomp_avx_gen2)

###############################################################################
Expand All @@ -1578,7 +1578,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen2)
###############################################################################
ENTRY(aesni_gcm_enc_avx_gen2)
GCM_ENC_DEC_AVX ENC
ret
RET
ENDPROC(aesni_gcm_enc_avx_gen2)

###############################################################################
Expand All @@ -1599,7 +1599,7 @@ ENDPROC(aesni_gcm_enc_avx_gen2)
###############################################################################
ENTRY(aesni_gcm_dec_avx_gen2)
GCM_ENC_DEC_AVX DEC
ret
RET
ENDPROC(aesni_gcm_dec_avx_gen2)
#endif /* CONFIG_AS_AVX */

Expand Down Expand Up @@ -2881,7 +2881,7 @@ ENTRY(aesni_gcm_precomp_avx_gen4)
pop %r14
pop %r13
pop %r12
ret
RET
ENDPROC(aesni_gcm_precomp_avx_gen4)


Expand All @@ -2903,7 +2903,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen4)
###############################################################################
ENTRY(aesni_gcm_enc_avx_gen4)
GCM_ENC_DEC_AVX2 ENC
ret
RET
ENDPROC(aesni_gcm_enc_avx_gen4)

###############################################################################
Expand All @@ -2924,7 +2924,7 @@ ENDPROC(aesni_gcm_enc_avx_gen4)
###############################################################################
ENTRY(aesni_gcm_dec_avx_gen4)
GCM_ENC_DEC_AVX2 DEC
ret
RET
ENDPROC(aesni_gcm_dec_avx_gen4)

#endif /* CONFIG_AS_AVX2 */
Loading

0 comments on commit bd9b51a

Please sign in to comment.