diff options
author | Konstantin Belousov <kib@FreeBSD.org> | 2018-05-24 13:17:24 +0000 |
---|---|---|
committer | Konstantin Belousov <kib@FreeBSD.org> | 2018-05-24 13:17:24 +0000 |
commit | 84881e1818a0b0652f41ba66050d261ce2850ec5 (patch) | |
tree | 85516977675af359a66b2db1c2f832c18f8bbe55 /sys/x86/x86 | |
parent | ab24c68d69a2d87beb5e7f2f74dcf1bc0bc9d3a5 (diff) | |
download | src-84881e1818a0b0652f41ba66050d261ce2850ec5.tar.gz src-84881e1818a0b0652f41ba66050d261ce2850ec5.zip |
MFC r334004:
Add Intel Spec Store Bypass Disable control.
This also includes the i386/include/pcpu.h part of the r334018.
Security: CVE-2018-3639
Approved by: re (gjb)
Notes
Notes:
svn path=/stable/11/; revision=334152
Diffstat (limited to 'sys/x86/x86')
-rw-r--r-- | sys/x86/x86/cpu_machdep.c | 108 |
1 files changed, 106 insertions, 2 deletions
diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c index ba359ebba6c2..9663c989dec0 100644 --- a/sys/x86/x86/cpu_machdep.c +++ b/sys/x86/x86/cpu_machdep.c @@ -158,6 +158,7 @@ void acpi_cpu_idle_mwait(uint32_t mwait_hint) { int *state; + uint64_t v; /* * A comment in Linux patch claims that 'CPUs run faster with @@ -174,11 +175,24 @@ acpi_cpu_idle_mwait(uint32_t mwait_hint) KASSERT(atomic_load_int(state) == STATE_SLEEPING, ("cpu_mwait_cx: wrong monitorbuf state")); atomic_store_int(state, STATE_MWAIT); - handle_ibrs_exit(); + if (PCPU_GET(ibpb_set) || hw_ssb_active) { + v = rdmsr(MSR_IA32_SPEC_CTRL); + wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS | + IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD)); + } else { + v = 0; + } cpu_monitor(state, 0, 0); if (atomic_load_int(state) == STATE_MWAIT) cpu_mwait(MWAIT_INTRBREAK, mwait_hint); - handle_ibrs_entry(); + + /* + * SSB cannot be disabled while we sleep, or rather, if it was + * disabled, the sysctl thread will bind to our cpu to tweak + * MSR. + */ + if (v != 0) + wrmsr(MSR_IA32_SPEC_CTRL, v); /* * We should exit on any event that interrupts mwait, because @@ -836,3 +850,93 @@ hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS) SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I", "Disable Indirect Branch Restricted Speculation"); + +int hw_ssb_active; +int hw_ssb_disable; + +SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD, + &hw_ssb_active, 0, + "Speculative Store Bypass Disable active"); + +static void +hw_ssb_set_one(bool enable) +{ + uint64_t v; + + v = rdmsr(MSR_IA32_SPEC_CTRL); + if (enable) + v |= (uint64_t)IA32_SPEC_CTRL_SSBD; + else + v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD; + wrmsr(MSR_IA32_SPEC_CTRL, v); +} + +static void +hw_ssb_set(bool enable, bool for_all_cpus) +{ + struct thread *td; + int bound_cpu, i, is_bound; + + if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) { + hw_ssb_active = 0; + return; + } + hw_ssb_active = enable; + if (for_all_cpus) { + td = curthread; + thread_lock(td); + is_bound = sched_is_bound(td); + bound_cpu = td->td_oncpu; + CPU_FOREACH(i) { + sched_bind(td, i); + hw_ssb_set_one(enable); + } + if (is_bound) + sched_bind(td, bound_cpu); + else + sched_unbind(td); + thread_unlock(td); + } else { + hw_ssb_set_one(enable); + } +} + +void +hw_ssb_recalculate(bool all_cpus) +{ + + switch (hw_ssb_disable) { + default: + hw_ssb_disable = 0; + /* FALLTHROUGH */ + case 0: /* off */ + hw_ssb_set(false, all_cpus); + break; + case 1: /* on */ + hw_ssb_set(true, all_cpus); + break; + case 2: /* auto */ + hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSBD_NO) != 0 ? + false : true, all_cpus); + break; + } +} + +static int +hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS) +{ + int error, val; + + val = hw_ssb_disable; + error = sysctl_handle_int(oidp, &val, 0, req); + if (error != 0 || req->newptr == NULL) + return (error); + hw_ssb_disable = val; + hw_ssb_recalculate(true); + return (0); +} +SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT | + CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, + hw_ssb_disable_handler, "I", + "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto"); + |