aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64/vmm
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2020-11-11 15:01:17 +0000
committerMark Johnston <markj@FreeBSD.org>2020-11-11 15:01:17 +0000
commit6f5a960678dd5330f71100ae379410199e97c665 (patch)
tree9248f12b1410bc8ebb58d7416074c6e00c972fe7 /sys/amd64/vmm
parenta4848c103cc4bafe1fad5c902072e2ab5523543a (diff)
downloadsrc-6f5a960678dd5330f71100ae379410199e97c665.tar.gz
src-6f5a960678dd5330f71100ae379410199e97c665.zip
vmm: Make pmap_invalidate_ept() wait synchronously for guest exits
Currently EPT TLB invalidation is done by incrementing a generation counter and issuing an IPI to all CPUs currently running vCPU threads. The VMM inner loop caches the most recently observed generation on each host CPU and invalidates TLB entries before executing the VM if the cached generation number is not the most recent value. pmap_invalidate_ept() issues IPIs to force each vCPU to stop executing guest instructions and reload the generation number. However, it does not actually wait for vCPUs to exit, potentially creating a window where guests may continue to reference stale TLB entries. Fix the problem by bracketing guest execution with an SMR read section which is entered before loading the invalidation generation. Then, pmap_invalidate_ept() increments the current write sequence before loading pm_active and sending IPIs, and polls readers to ensure that all vCPUs potentially operating with stale TLB entries have exited before pmap_invalidate_ept() returns. Also ensure that unsynchronized loads of the generation counter are wrapped with atomic(9), and stop (inconsistently) updating the invalidation counter and pm_active bitmask with acquire semantics. Reviewed by: grehan, kib Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D26910
Notes
Notes: svn path=/head/; revision=367593
Diffstat (limited to 'sys/amd64/vmm')
-rw-r--r--sys/amd64/vmm/amd/svm.c39
-rw-r--r--sys/amd64/vmm/intel/vmx.c5
2 files changed, 27 insertions, 17 deletions
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index f90ef6656fec..3b42424c9e75 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/smr.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -1800,15 +1801,17 @@ restore_host_tss(void)
}
static void
-check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
+svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
{
struct svm_vcpu *vcpustate;
struct vmcb_ctrl *ctrl;
long eptgen;
+ int cpu;
bool alloc_asid;
- KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not "
- "active on cpu %u", __func__, thiscpu));
+ cpu = curcpu;
+ CPU_SET_ATOMIC(cpu, &pmap->pm_active);
+ smr_enter(pmap->pm_eptsmr);
vcpustate = svm_get_vcpu(sc, vcpuid);
ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
@@ -1849,10 +1852,10 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
*/
alloc_asid = false;
- eptgen = pmap->pm_eptgen;
+ eptgen = atomic_load_long(&pmap->pm_eptgen);
ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
- if (vcpustate->asid.gen != asid[thiscpu].gen) {
+ if (vcpustate->asid.gen != asid[cpu].gen) {
alloc_asid = true; /* (c) and (d) */
} else if (vcpustate->eptgen != eptgen) {
if (flush_by_asid())
@@ -1869,10 +1872,10 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
}
if (alloc_asid) {
- if (++asid[thiscpu].num >= nasid) {
- asid[thiscpu].num = 1;
- if (++asid[thiscpu].gen == 0)
- asid[thiscpu].gen = 1;
+ if (++asid[cpu].num >= nasid) {
+ asid[cpu].num = 1;
+ if (++asid[cpu].gen == 0)
+ asid[cpu].gen = 1;
/*
* If this cpu does not support "flush-by-asid"
* then flush the entire TLB on a generation
@@ -1882,8 +1885,8 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
if (!flush_by_asid())
ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
}
- vcpustate->asid.gen = asid[thiscpu].gen;
- vcpustate->asid.num = asid[thiscpu].num;
+ vcpustate->asid.gen = asid[cpu].gen;
+ vcpustate->asid.num = asid[cpu].num;
ctrl->asid = vcpustate->asid.num;
svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
@@ -1902,6 +1905,13 @@ check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
}
+static void
+svm_pmap_deactivate(pmap_t pmap)
+{
+ smr_exit(pmap->pm_eptsmr);
+ CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
+}
+
static __inline void
disable_gintr(void)
{
@@ -2083,14 +2093,11 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
svm_inj_interrupts(svm_sc, vcpu, vlapic);
- /* Activate the nested pmap on 'curcpu' */
- CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active);
-
/*
* Check the pmap generation and the ASID generation to
* ensure that the vcpu does not use stale TLB mappings.
*/
- check_asid(svm_sc, vcpu, pmap, curcpu);
+ svm_pmap_activate(svm_sc, vcpu, pmap);
ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
vcpustate->dirty = 0;
@@ -2102,7 +2109,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
svm_launch(vmcb_pa, gctx, get_pcpu());
svm_dr_leave_guest(gctx);
- CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
+ svm_pmap_deactivate(pmap);
/*
* The host GDTR and IDTR is saved by VMRUN and restored
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 7d2a5f0f27e8..94f83a098a4c 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
+#include <sys/smr.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -1273,7 +1274,7 @@ vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
* Note also that this will invalidate mappings tagged with 'vpid'
* for "all" EP4TAs.
*/
- if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
+ if (atomic_load_long(&pmap->pm_eptgen) == vmx->eptgen[curcpu]) {
invvpid_desc._res1 = 0;
invvpid_desc._res2 = 0;
invvpid_desc.vpid = vmxstate->vpid;
@@ -2948,6 +2949,7 @@ vmx_pmap_activate(struct vmx *vmx, pmap_t pmap)
cpu = curcpu;
CPU_SET_ATOMIC(cpu, &pmap->pm_active);
+ smr_enter(pmap->pm_eptsmr);
eptgen = atomic_load_long(&pmap->pm_eptgen);
if (eptgen != vmx->eptgen[cpu]) {
vmx->eptgen[cpu] = eptgen;
@@ -2959,6 +2961,7 @@ vmx_pmap_activate(struct vmx *vmx, pmap_t pmap)
static __inline void
vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap)
{
+ smr_exit(pmap->pm_eptsmr);
CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
}