Skip to content

Commit

Permalink
Changes representative of linux-3.10.0-1062.1.1.el7.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Aug 13, 2019
1 parent 1f278f2 commit 6e20ff7
Show file tree
Hide file tree
Showing 21 changed files with 1,098 additions and 140 deletions.
1 change: 1 addition & 0 deletions Documentation/admin-guide/hw-vuln/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,6 @@ are configurable at compile, boot or run time.
.. toctree::
:maxdepth: 1

spectre
l1tf
mds
769 changes: 769 additions & 0 deletions Documentation/admin-guide/hw-vuln/spectre.rst

Large diffs are not rendered by default.

6 changes: 5 additions & 1 deletion Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2035,7 +2035,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
improves system performance, but it may also
expose users to several CPU vulnerabilities.
Equivalent to: nopti [X86,PPC]
nospectre_v1 [PPC]
nospectre_v1 [X86,PPC]
nobp=0 [S390]
nospectre_v2 [X86,PPC,S390]
spec_store_bypass_disable=off [X86,PPC]
Expand Down Expand Up @@ -2367,6 +2367,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.

nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
(bounds check bypass). With this option data leaks are
possible in the system.

nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ EXTRAVERSION =
NAME = Unicycling Gorilla
RHEL_MAJOR = 7
RHEL_MINOR = 7
RHEL_RELEASE = 1062
RHEL_RELEASE = 1062.1.1

#
# DRM backport version
Expand Down
38 changes: 38 additions & 0 deletions arch/x86/include/asm/calling.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ For 32-bit we have the following conventions - kernel is built with
*/

#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
#include <asm/nops.h>

/*
* 64-bit system call stack frame layout defines and helpers,
Expand Down Expand Up @@ -221,3 +223,39 @@ For 32-bit we have the following conventions - kernel is built with
.quad 417b
.popsection
.endm

/*
* Mitigate Spectre v1 for conditional swapgs code paths.
*
* FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
* prevent a speculative swapgs when coming from kernel space.
*
* FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
* to prevent the swapgs from getting speculatively skipped when coming from
* user space.
*
* RHEL7 uses gs-indexed per-cpu variables for dynamic PTI enabling/disabling.
* So that on/off state of PTI doesn't matter in determining if fencing should
* be used or not. We have to properly fence swapgs before the invocation
* of the PTI macros.
*
* RHEL7 also doesn't have the ALTERNATIVE asm macro. So we have to open-code
* it. The lfence instruction is 3 bytes long.
*/
.macro _FENCE_SWAPGS feature
661: ASM_NOP3; 662:
.pushsection .altinstr_replacement, "ax"
663: lfence; 664:
.popsection
.pushsection .altinstructions, "a"
altinstruction_entry 661b, 663b, \feature, 662b-661b, 664b-663b
.popsection
.endm

.macro FENCE_SWAPGS_USER_ENTRY
_FENCE_SWAPGS X86_FEATURE_FENCE_SWAPGS_USER
.endm

.macro FENCE_SWAPGS_KERNEL_ENTRY
_FENCE_SWAPGS X86_FEATURE_FENCE_SWAPGS_KERNEL
.endm
1 change: 0 additions & 1 deletion arch/x86/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
#define _ASM_X86_CPUFEATURE_H

#include <asm/processor.h>
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */

#if defined(__KERNEL__) && !defined(__ASSEMBLY__)

Expand Down
4 changes: 4 additions & 0 deletions arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,15 +192,18 @@
*/

#define X86_FEATURE_RING3MWAIT (7*32+ 0) /* Ring 3 MONITOR/MWAIT */
#define X86_FEATURE_FENCE_SWAPGS_USER (7*32+ 1) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_CAT_L3 (7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L2 (7*32+ 5) /* Cache Allocation Technology L2 */
#define X86_FEATURE_CDP_L3 (7*32+ 6) /* Code and Data Prioritization L3 */
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+11) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (7*32+12) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_RETPOLINE_AMD (7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
Expand Down Expand Up @@ -358,5 +361,6 @@
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */

#endif /* _ASM_X86_CPUFEATURES_H */
99 changes: 94 additions & 5 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <linux/sched/smt.h>


static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_parse_cmdline(void);
void ssb_select_mitigation(void);
Expand Down Expand Up @@ -70,15 +71,15 @@ void __init check_bugs(void)
* SPEC_CTRL MSR value is properly set up.
*/
ssb_parse_cmdline();
ssb_select_mitigation();

spec_ctrl_init();
spectre_v2_select_mitigation();

/* Select the proper CPU mitigations before patching alternatives */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
spec_ctrl_cpu_init();

ssb_select_mitigation();
l1tf_select_mitigation();

mds_select_mitigation();

arch_smt_update();
Expand Down Expand Up @@ -215,6 +216,94 @@ static int __init mds_cmdline(char *str)
}
early_param("mds", mds_cmdline);

#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt

enum spectre_v1_mitigation {
SPECTRE_V1_MITIGATION_NONE,
SPECTRE_V1_MITIGATION_AUTO,
};

static enum spectre_v1_mitigation spectre_v1_mitigation __read_mostly =
SPECTRE_V1_MITIGATION_AUTO;

static const char * const spectre_v1_strings[] = {
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: Load fences, __user pointer sanitization and usercopy barriers only; no swapgs barriers",
[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: Load fences, usercopy/swapgs barriers and __user pointer sanitization",
};

/*
* Does SMAP provide full mitigation against speculative kernel access to
* userspace?
*/
static bool smap_works_speculatively(void)
{
if (!boot_cpu_has(X86_FEATURE_SMAP))
return false;

/*
* On CPUs which are vulnerable to Meltdown, SMAP does not
* prevent speculative access to user data in the L1 cache.
* Consider SMAP to be non-functional as a mitigation on these
* CPUs.
*/
if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
return false;

return true;
}

static void __init spectre_v1_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
return;
}

if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
/*
* With Spectre v1, a user can speculatively control either
* path of a conditional swapgs with a user-controlled GS
* value. The mitigation is to add lfences to both code paths.
*
* If FSGSBASE is enabled, the user can put a kernel address in
* GS, in which case SMAP provides no protection.
*
* [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
* FSGSBASE enablement patches have been merged. ]
*
* If FSGSBASE is disabled, the user can only put a user space
* address in GS. That makes an attack harder, but still
* possible if there's no SMAP protection.
*/
if (!smap_works_speculatively()) {
/*
* Mitigation can be provided from SWAPGS itself if
* it is serializing. If not, mitigate with an LFENCE
* to stop speculation through swapgs.
*/
if (boot_cpu_has_bug(X86_BUG_SWAPGS))
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);

/*
* Enable lfences in the kernel entry (non-swapgs)
* paths, to prevent user entry from speculatively
* skipping swapgs.
*/
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
}
}

pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
}

static int __init nospectre_v1_cmdline(char *str)
{
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
return 0;
}
early_param("nospectre_v1", nospectre_v1_cmdline);

#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt

Expand Down Expand Up @@ -932,7 +1021,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
break;

case X86_BUG_SPECTRE_V1:
return sprintf(buf, "Mitigation: Load fences, __user pointer sanitization\n");
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);

case X86_BUG_SPECTRE_V2:
return sprintf(buf, "%s%s%s\n",
Expand Down
42 changes: 27 additions & 15 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -901,6 +901,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_L1TF BIT(3)
#define NO_MDS BIT(4)
#define MSBDS_ONLY BIT(5)
#define NO_SWAPGS BIT(6)

#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
Expand All @@ -927,29 +928,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),

VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),

VULNWL_INTEL(CORE_YONAH, NO_SSB),

VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),

VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),

/*
* Technically, swapgs isn't serializing on AMD (despite it previously
* being documented as such in the APM). But according to AMD, %gs is
* updated non-speculatively, and the issuing of %gs-relative memory
* operands will be blocked until the %gs update completes, which is
* good enough for our purposes.
*/

/* AMD Family 0xf - 0x12 */
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),

/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
{}
};

Expand Down Expand Up @@ -986,6 +995,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
}

if (!cpu_matches(NO_SWAPGS))
setup_force_cpu_bug(X86_BUG_SWAPGS);

if (cpu_matches(NO_MELTDOWN))
return;

Expand Down
14 changes: 14 additions & 0 deletions arch/x86/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -265,12 +265,16 @@ ENDPROC(native_usergs_sysret64)
testl $3, CS-RBP(%rsi)
je 1f
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3
IBRS_ENTRY_CLOBBER /* no indirect jump allowed before IBRS */
movq PER_CPU_VAR(kernel_stack), %rsp
FILL_RETURN_BUFFER_CLOBBER /* no ret allowed before stuffing the RSB */
movq %rsi, %rsp
jmp 2f
1:
FENCE_SWAPGS_KERNEL_ENTRY
2:
/* Check to see if we're on the trampoline stack. */
movq PER_CPU_VAR(init_tss + TSS_sp0), %rcx
cmpq %rcx, %rsp
Expand Down Expand Up @@ -355,8 +359,12 @@ ENTRY(save_paranoid)
testl %edx,%edx
js 1f /* negative -> in kernel */
SWAPGS
FENCE_SWAPGS_USER_ENTRY
xorl %ebx,%ebx
jmp 2f
1:
FENCE_SWAPGS_KERNEL_ENTRY
2:
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
IBRS_ENTRY_SAVE_AND_CLOBBER save_reg=%r13d /* no indirect jump allowed before IBRS */
FILL_RETURN_BUFFER_CLOBBER /* no ret allowed before stuffing the RSB */
Expand Down Expand Up @@ -1525,6 +1533,7 @@ ENTRY(error_entry)
je error_kernelspace
error_swapgs:
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3
movq %rsp, %rsi
movq PER_CPU_VAR(kernel_stack), %rsp
Expand Down Expand Up @@ -1572,6 +1581,7 @@ error_kernelspace:
je bstep_iret
cmpq $gs_change,RIP+8(%rsp)
je error_swapgs
FENCE_SWAPGS_KERNEL_ENTRY
jmp error_sti

bstep_iret:
Expand Down Expand Up @@ -1685,6 +1695,7 @@ ENTRY(nmi)
*/

SWAPGS_UNSAFE_STACK
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3
cld
movq %rsp, %rsi
Expand Down Expand Up @@ -1920,6 +1931,9 @@ end_repeat_nmi:
* Even with normal interrupts enabled. An NMI should not be
* setting NEED_RESCHED or anything that normal interrupts and
* exceptions might do.
*
* save_paranoid provides the necessary swapgs fencing. So no
* explicit FENCE_SWAPGS_KERNEL_ENTRY is needed.
*/
call save_paranoid
DEFAULT_FRAME 0
Expand Down
Loading

0 comments on commit 6e20ff7

Please sign in to comment.