aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2022-11-18 17:59:21 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2023-01-26 21:44:52 +0000
commit6fc2d2dbe2354f99c3973f1d855479d9dd65232e (patch)
treeecf0be376c864ae88d809a83e50ae87314a3c84b
parentbf5683caedae298a17dfed91aeddb8249772c136 (diff)
downloadsrc-6fc2d2dbe2354f99c3973f1d855479d9dd65232e.tar.gz
src-6fc2d2dbe2354f99c3973f1d855479d9dd65232e.zip
vmm: Refactor storage of CPU-dependent per-vCPU data.
Rather than storing static arrays of per-vCPU data in the CPU-specific per-VM structure, adopt a more dynamic model similar to that used to manage CPU-specific per-VM data. That is, add new vmmops methods to init and cleanup a single vCPU. The init method returns a pointer that is stored in 'struct vcpu' as a cookie pointer. This cookie pointer is now passed to other vmmops callbacks in place of the integer index. The index is now only used in KTR traces and when calling back into the CPU-independent layer. Reviewed by: corvink, markj Differential Revision: https://reviews.freebsd.org/D37151 (cherry picked from commit 1aa5150479bf35c90c6770e6ea90e8462cfb6bf9)
-rw-r--r--sys/amd64/include/vmm.h24
-rw-r--r--sys/amd64/vmm/amd/svm.c606
-rw-r--r--sys/amd64/vmm/amd/svm.h4
-rw-r--r--sys/amd64/vmm/amd/svm_msr.c21
-rw-r--r--sys/amd64/vmm/amd/svm_msr.h15
-rw-r--r--sys/amd64/vmm/amd/svm_softc.h34
-rw-r--r--sys/amd64/vmm/amd/vmcb.c80
-rw-r--r--sys/amd64/vmm/amd/vmcb.h23
-rw-r--r--sys/amd64/vmm/intel/vmx.c809
-rw-r--r--sys/amd64/vmm/intel/vmx.h12
-rw-r--r--sys/amd64/vmm/intel/vmx_msr.c74
-rw-r--r--sys/amd64/vmm/intel/vmx_msr.h16
-rw-r--r--sys/amd64/vmm/vmm.c65
13 files changed, 926 insertions, 857 deletions
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
index 62456fe9d12d..9f76eda9d8e8 100644
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -167,27 +167,29 @@ typedef int (*vmm_init_func_t)(int ipinum);
typedef int (*vmm_cleanup_func_t)(void);
typedef void (*vmm_resume_func_t)(void);
typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
-typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
+typedef int (*vmi_run_func_t)(void *vmi, void *vcpui, register_t rip,
struct pmap *pmap, struct vm_eventinfo *info);
typedef void (*vmi_cleanup_func_t)(void *vmi);
-typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
+typedef void * (*vmi_vcpu_init_func_t)(void *vmi, int vcpu_id);
+typedef void (*vmi_vcpu_cleanup_func_t)(void *vmi, void *vcpui);
+typedef int (*vmi_get_register_t)(void *vmi, void *vcpui, int num,
uint64_t *retval);
-typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
+typedef int (*vmi_set_register_t)(void *vmi, void *vcpui, int num,
uint64_t val);
-typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
+typedef int (*vmi_get_desc_t)(void *vmi, void *vcpui, int num,
struct seg_desc *desc);
-typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
+typedef int (*vmi_set_desc_t)(void *vmi, void *vcpui, int num,
struct seg_desc *desc);
-typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
-typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
+typedef int (*vmi_get_cap_t)(void *vmi, void *vcpui, int num, int *retval);
+typedef int (*vmi_set_cap_t)(void *vmi, void *vcpui, int num, int val);
typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
-typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
+typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, void *vcpui);
typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
typedef int (*vmi_snapshot_t)(void *vmi, struct vm_snapshot_meta *meta);
typedef int (*vmi_snapshot_vcpu_t)(void *vmi, struct vm_snapshot_meta *meta,
- int vcpu);
-typedef int (*vmi_restore_tsc_t)(void *vmi, int vcpuid, uint64_t now);
+ void *vcpui);
+typedef int (*vmi_restore_tsc_t)(void *vmi, void *vcpui, uint64_t now);
struct vmm_ops {
vmm_init_func_t modinit; /* module wide initialization */
@@ -197,6 +199,8 @@ struct vmm_ops {
vmi_init_func_t init; /* vm-specific initialization */
vmi_run_func_t run;
vmi_cleanup_func_t cleanup;
+ vmi_vcpu_init_func_t vcpu_init;
+ vmi_vcpu_cleanup_func_t vcpu_cleanup;
vmi_get_register_t getreg;
vmi_set_register_t setreg;
vmi_get_desc_t getdesc;
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
index fca3722ed7f4..dee88f11dce2 100644
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -132,8 +132,8 @@ static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
-static int svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
-static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
+static int svm_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc);
+static int svm_setreg(void *arg, void *vcpui, int ident, uint64_t val);
static __inline int
flush_by_asid(void)
@@ -283,18 +283,18 @@ svm_modresume(void)
#ifdef BHYVE_SNAPSHOT
int
-svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset)
+svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t offset)
{
int error;
struct vmcb_ctrl *ctrl;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
ctrl->tsc_offset = offset;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
- VCPU_CTR1(sc->vm, vcpu, "tsc offset changed to %#lx", offset);
+ svm_set_dirty(vcpu, VMCB_CACHE_I);
+ VCPU_CTR1(sc->vm, vcpu->vcpuid, "tsc offset changed to %#lx", offset);
- error = vm_set_tsc_offset(sc->vm, vcpu, offset);
+ error = vm_set_tsc_offset(sc->vm, vcpu->vcpuid, offset);
return (error);
}
@@ -382,26 +382,27 @@ svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
}
static __inline int
-svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
+svm_get_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
+ uint32_t bitmask)
{
struct vmcb_ctrl *ctrl;
KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
return (ctrl->intercept[idx] & bitmask ? 1 : 0);
}
static __inline void
-svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
- int enabled)
+svm_set_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int idx,
+ uint32_t bitmask, int enabled)
{
struct vmcb_ctrl *ctrl;
uint32_t oldval;
KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
oldval = ctrl->intercept[idx];
if (enabled)
@@ -410,28 +411,30 @@ svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
ctrl->intercept[idx] &= ~bitmask;
if (ctrl->intercept[idx] != oldval) {
- svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
- VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
+ svm_set_dirty(vcpu, VMCB_CACHE_I);
+ VCPU_CTR3(sc->vm, vcpu->vcpuid, "intercept[%d] modified "
"from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
}
}
static __inline void
-svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
+svm_disable_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int off,
+ uint32_t bitmask)
{
svm_set_intercept(sc, vcpu, off, bitmask, 0);
}
static __inline void
-svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
+svm_enable_intercept(struct svm_softc *sc, struct svm_vcpu *vcpu, int off,
+ uint32_t bitmask)
{
svm_set_intercept(sc, vcpu, off, bitmask, 1);
}
static void
-vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
+vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
uint64_t msrpm_base_pa, uint64_t np_pml4)
{
struct vmcb_ctrl *ctrl;
@@ -439,8 +442,8 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
uint32_t mask;
int n;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
- state = svm_get_vmcb_state(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
+ state = svm_get_vmcb_state(vcpu);
ctrl->iopm_base_pa = iopm_base_pa;
ctrl->msrpm_base_pa = msrpm_base_pa;
@@ -465,7 +468,7 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
* Intercept everything when tracing guest exceptions otherwise
* just intercept machine check exception.
*/
- if (vcpu_trace_exceptions(sc->vm, vcpu)) {
+ if (vcpu_trace_exceptions(sc->vm, vcpu->vcpuid)) {
for (n = 0; n < 32; n++) {
/*
* Skip unimplemented vectors in the exception bitmap.
@@ -506,7 +509,7 @@ vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
- if (vcpu_trap_wbinvd(sc->vm, vcpu)) {
+ if (vcpu_trap_wbinvd(sc->vm, vcpu->vcpuid)) {
svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT,
VMCB_INTCPT_WBINVD);
}
@@ -559,10 +562,6 @@ static void *
svm_init(struct vm *vm, pmap_t pmap)
{
struct svm_softc *svm_sc;
- struct svm_vcpu *vcpu;
- vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
- int i;
- uint16_t maxcpus;
svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
@@ -576,7 +575,7 @@ svm_init(struct vm *vm, pmap_t pmap)
panic("contigmalloc of SVM IO bitmap failed");
svm_sc->vm = vm;
- svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pmltop);
+ svm_sc->nptp = vtophys(pmap->pm_pmltop);
/*
* Intercept read and write accesses to all MSRs.
@@ -611,23 +610,28 @@ svm_init(struct vm *vm, pmap_t pmap)
/* Intercept access to all I/O ports. */
memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
- iopm_pa = vtophys(svm_sc->iopm_bitmap);
- msrpm_pa = vtophys(svm_sc->msr_bitmap);
- pml4_pa = svm_sc->nptp;
- maxcpus = vm_get_maxcpus(svm_sc->vm);
- for (i = 0; i < maxcpus; i++) {
- vcpu = svm_get_vcpu(svm_sc, i);
- vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE,
- M_SVM, M_WAITOK | M_ZERO);
- vcpu->nextrip = ~0;
- vcpu->lastcpu = NOCPU;
- vcpu->vmcb_pa = vtophys(vcpu->vmcb);
- vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
- svm_msr_guest_init(svm_sc, i);
- }
return (svm_sc);
}
+static void *
+svm_vcpu_init(void *arg, int vcpuid)
+{
+ struct svm_softc *sc = arg;
+ struct svm_vcpu *vcpu;
+
+ vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO);
+ vcpu->vcpuid = vcpuid;
+ vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM,
+ M_WAITOK | M_ZERO);
+ vcpu->nextrip = ~0;
+ vcpu->lastcpu = NOCPU;
+ vcpu->vmcb_pa = vtophys(vcpu->vmcb);
+ vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap),
+ sc->nptp);
+ svm_msr_guest_init(sc, vcpu);
+ return (vcpu);
+}
+
/*
* Collateral for a generic SVM VM-exit.
*/
@@ -720,8 +724,8 @@ svm_inout_str_count(struct svm_regctx *regs, int rep)
}
static void
-svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
- int in, struct vm_inout_str *vis)
+svm_inout_str_seginfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
+ int64_t info1, int in, struct vm_inout_str *vis)
{
int error __diagused, s;
@@ -774,7 +778,8 @@ svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
* Handle guest I/O intercept.
*/
static int
-svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
+svm_handle_io(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
+ struct vm_exit *vmexit)
{
struct vmcb_ctrl *ctrl;
struct vmcb_state *state;
@@ -783,9 +788,9 @@ svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
uint64_t info1;
int inout_string;
- state = svm_get_vmcb_state(svm_sc, vcpu);
- ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
- regs = svm_get_guest_regctx(svm_sc, vcpu);
+ state = svm_get_vmcb_state(vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
+ regs = svm_get_guest_regctx(vcpu);
info1 = ctrl->exitinfo1;
inout_string = info1 & BIT(2) ? 1 : 0;
@@ -811,7 +816,7 @@ svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
if (inout_string) {
vmexit->exitcode = VM_EXITCODE_INOUT_STR;
vis = &vmexit->u.inout_str;
- svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
+ svm_paging_info(svm_get_vmcb(vcpu), &vis->paging);
vis->rflags = state->rflags;
vis->cr0 = state->cr0;
vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
@@ -932,12 +937,12 @@ intrtype_to_str(int intr_type)
* Inject an event to vcpu as described in section 15.20, "Event injection".
*/
static void
-svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
- uint32_t error, bool ec_valid)
+svm_eventinject(struct svm_softc *sc, struct svm_vcpu *vcpu, int intr_type,
+ int vector, uint32_t error, bool ec_valid)
{
struct vmcb_ctrl *ctrl;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
("%s: event already pending %#lx", __func__, ctrl->eventinj));
@@ -962,24 +967,25 @@ svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
if (ec_valid) {
ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
ctrl->eventinj |= (uint64_t)error << 32;
- VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
+ VCPU_CTR3(sc->vm, vcpu->vcpuid,
+ "Injecting %s at vector %d errcode %#x",
intrtype_to_str(intr_type), vector, error);
} else {
- VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
+ VCPU_CTR2(sc->vm, vcpu->vcpuid, "Injecting %s at vector %d",
intrtype_to_str(intr_type), vector);
}
}
static void
-svm_update_virqinfo(struct svm_softc *sc, int vcpu)
+svm_update_virqinfo(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
struct vm *vm;
struct vlapic *vlapic;
struct vmcb_ctrl *ctrl;
vm = sc->vm;
- vlapic = vm_lapic(vm, vcpu);
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ vlapic = vm_lapic(vm, vcpu->vcpuid);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
/* Update %cr8 in the emulated vlapic */
vlapic_set_cr8(vlapic, ctrl->v_tpr);
@@ -990,12 +996,14 @@ svm_update_virqinfo(struct svm_softc *sc, int vcpu)
}
static void
-svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
+svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
{
struct vmcb_ctrl *ctrl;
uint64_t intinfo;
+ int vcpuid;
- ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
+ vcpuid = vcpu->vcpuid;
+ ctrl = svm_get_vmcb_ctrl(vcpu);
intinfo = ctrl->exitintinfo;
if (!VMCB_EXITINTINFO_VALID(intinfo))
return;
@@ -1006,15 +1014,15 @@ svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
* If a #VMEXIT happened during event delivery then record the event
* that was being delivered.
*/
- VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
+ VCPU_CTR2(svm_sc->vm, vcpuid, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
- vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
- vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VCPU_EXITINTINFO, 1);
+ vm_exit_intinfo(svm_sc->vm, vcpuid, intinfo);
}
#ifdef INVARIANTS
static __inline int
-vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
+vintr_intercept_enabled(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
@@ -1023,11 +1031,11 @@ vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
#endif
static __inline void
-enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
+enable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
struct vmcb_ctrl *ctrl;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
@@ -1036,20 +1044,20 @@ enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
return;
}
- VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
+ VCPU_CTR0(sc->vm, vcpu->vcpuid, "Enable intr window exiting");
ctrl->v_irq = 1;
ctrl->v_ign_tpr = 1;
ctrl->v_intr_vector = 0;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ svm_set_dirty(vcpu, VMCB_CACHE_TPR);
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
static __inline void
-disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
+disable_intr_window_exiting(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
struct vmcb_ctrl *ctrl;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
KASSERT(!vintr_intercept_enabled(sc, vcpu),
@@ -1057,35 +1065,36 @@ disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
return;
}
- VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
+ VCPU_CTR0(sc->vm, vcpu->vcpuid, "Disable intr window exiting");
ctrl->v_irq = 0;
ctrl->v_intr_vector = 0;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ svm_set_dirty(vcpu, VMCB_CACHE_TPR);
svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
}
static int
-svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
+svm_modify_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu,
+ uint64_t val)
{
struct vmcb_ctrl *ctrl;
int oldval, newval;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
oldval = ctrl->intr_shadow;
newval = val ? 1 : 0;
if (newval != oldval) {
ctrl->intr_shadow = newval;
- VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
+ VCPU_CTR1(sc->vm, vcpu->vcpuid, "Setting intr_shadow to %d", newval);
}
return (0);
}
static int
-svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
+svm_get_intr_shadow(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t *val)
{
struct vmcb_ctrl *ctrl;
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
*val = ctrl->intr_shadow;
return (0);
}
@@ -1096,7 +1105,7 @@ svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
* to track when the vcpu is done handling the NMI.
*/
static int
-nmi_blocked(struct svm_softc *sc, int vcpu)
+nmi_blocked(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
int blocked;
@@ -1106,21 +1115,21 @@ nmi_blocked(struct svm_softc *sc, int vcpu)
}
static void
-enable_nmi_blocking(struct svm_softc *sc, int vcpu)
+enable_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
- VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
+ VCPU_CTR0(sc->vm, vcpu->vcpuid, "vNMI blocking enabled");
svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
}
static void
-clear_nmi_blocking(struct svm_softc *sc, int vcpu)
+clear_nmi_blocking(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
int error __diagused;
KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
- VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
+ VCPU_CTR0(sc->vm, vcpu->vcpuid, "vNMI blocking cleared");
/*
* When the IRET intercept is cleared the vcpu will attempt to execute
* the "iret" when it runs next. However, it is possible to inject
@@ -1145,17 +1154,19 @@ clear_nmi_blocking(struct svm_softc *sc, int vcpu)
#define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL
static int
-svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
+svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
+ bool *retu)
{
struct vm_exit *vme;
struct vmcb_state *state;
uint64_t changed, lma, oldval;
- int error __diagused;
+ int error __diagused, vcpuid;
- state = svm_get_vmcb_state(sc, vcpu);
+ state = svm_get_vmcb_state(vcpu);
+ vcpuid = vcpu->vcpuid;
oldval = state->efer;
- VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
+ VCPU_CTR2(sc->vm, vcpuid, "wrmsr(efer) %#lx/%#lx", oldval, newval);
newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
changed = oldval ^ newval;
@@ -1179,7 +1190,7 @@ svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
goto gpf;
if (newval & EFER_NXE) {
- if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE))
+ if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_NO_EXECUTE))
goto gpf;
}
@@ -1188,19 +1199,19 @@ svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
* this is fixed flag guest attempt to set EFER_LMSLE as an error.
*/
if (newval & EFER_LMSLE) {
- vme = vm_exitinfo(sc->vm, vcpu);
+ vme = vm_exitinfo(sc->vm, vcpuid);
vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
*retu = true;
return (0);
}
if (newval & EFER_FFXSR) {
- if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR))
+ if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_FFXSR))
goto gpf;
}
if (newval & EFER_TCE) {
- if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE))
+ if (!vm_cpuid_capability(sc->vm, vcpuid, VCC_TCE))
goto gpf;
}
@@ -1208,18 +1219,18 @@ svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
return (0);
gpf:
- vm_inject_gp(sc->vm, vcpu);
+ vm_inject_gp(sc->vm, vcpuid);
return (0);
}
static int
-emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
- bool *retu)
+emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
+ uint64_t val, bool *retu)
{
int error;
if (lapic_msr(num))
- error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
+ error = lapic_wrmsr(sc->vm, vcpu->vcpuid, num, val, retu);
else if (num == MSR_EFER)
error = svm_write_efer(sc, vcpu, val, retu);
else
@@ -1229,7 +1240,8 @@ emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
}
static int
-emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
+emulate_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
+ bool *retu)
{
struct vmcb_state *state;
struct svm_regctx *ctx;
@@ -1237,13 +1249,13 @@ emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
int error;
if (lapic_msr(num))
- error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
+ error = lapic_rdmsr(sc->vm, vcpu->vcpuid, num, &result, retu);
else
error = svm_rdmsr(sc, vcpu, num, &result, retu);
if (error == 0) {
- state = svm_get_vmcb_state(sc, vcpu);
- ctx = svm_get_guest_regctx(sc, vcpu);
+ state = svm_get_vmcb_state(vcpu);
+ ctx = svm_get_guest_regctx(vcpu);
state->rax = result & 0xffffffff;
ctx->sctx_rdx = result >> 32;
}
@@ -1324,7 +1336,8 @@ nrip_valid(uint64_t exitcode)
}
static int
-svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
+svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
+ struct vm_exit *vmexit)
{
struct vmcb *vmcb;
struct vmcb_state *state;
@@ -1333,12 +1346,14 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
uint64_t code, info1, info2, val;
uint32_t eax, ecx, edx;
int error __diagused, errcode_valid, handled, idtvec, reflect;
+ int vcpuid;
bool retu;
- ctx = svm_get_guest_regctx(svm_sc, vcpu);
- vmcb = svm_get_vmcb(svm_sc, vcpu);
+ ctx = svm_get_guest_regctx(vcpu);
+ vmcb = svm_get_vmcb(vcpu);
state = &vmcb->state;
ctrl = &vmcb->ctrl;
+ vcpuid = vcpu->vcpuid;
handled = 0;
code = ctrl->exitcode;
@@ -1349,7 +1364,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
vmexit->rip = state->rip;
vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_COUNT, 1);
/*
* #VMEXIT(INVALID) needs to be handled early because the VMCB is
@@ -1381,18 +1396,18 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
handled = 1;
break;
case VMCB_EXIT_VINTR: /* interrupt window exiting */
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_VINTR, 1);
handled = 1;
break;
case VMCB_EXIT_INTR: /* external interrupt */
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_EXTINT, 1);
handled = 1;
break;
case VMCB_EXIT_NMI: /* external NMI */
handled = 1;
break;
case 0x40 ... 0x5F:
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_EXCEPTION, 1);
reflect = 1;
idtvec = code - 0x40;
switch (idtvec) {
@@ -1402,7 +1417,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
* reflect the machine check back into the guest.
*/
reflect = 0;
- VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
+ VCPU_CTR0(svm_sc->vm, vcpuid, "Vectoring to MCE handler");
__asm __volatile("int $18");
break;
case IDT_PF:
@@ -1436,7 +1451,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
* event injection is identical to what it was when
* the exception originally happened.
*/
- VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
+ VCPU_CTR2(svm_sc->vm, vcpuid, "Reset inst_length from %d "
"to zero before injecting exception %d",
vmexit->inst_length, idtvec);
vmexit->inst_length = 0;
@@ -1452,9 +1467,9 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
if (reflect) {
/* Reflect the exception back into the guest */
- VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
+ VCPU_CTR2(svm_sc->vm, vcpuid, "Reflecting exception "
"%d/%#x into the guest", idtvec, (int)info1);
- error = vm_inject_exception(svm_sc->vm, vcpu, idtvec,
+ error = vm_inject_exception(svm_sc->vm, vcpuid, idtvec,
errcode_valid, info1, 0);
KASSERT(error == 0, ("%s: vm_inject_exception error %d",
__func__, error));
@@ -1468,9 +1483,9 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
retu = false;
if (info1) {
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_WRMSR, 1);
val = (uint64_t)edx << 32 | eax;
- VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
+ VCPU_CTR2(svm_sc->vm, vcpuid, "wrmsr %#x val %#lx",
ecx, val);
if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
vmexit->exitcode = VM_EXITCODE_WRMSR;
@@ -1483,8 +1498,8 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
("emulate_wrmsr retu with bogus exitcode"));
}
} else {
- VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
+ VCPU_CTR1(svm_sc->vm, vcpuid, "rdmsr %#x", ecx);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_RDMSR, 1);
if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
vmexit->exitcode = VM_EXITCODE_RDMSR;
vmexit->u.msr.code = ecx;
@@ -1498,40 +1513,40 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
break;
case VMCB_EXIT_IO:
handled = svm_handle_io(svm_sc, vcpu, vmexit);
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INOUT, 1);
break;
case VMCB_EXIT_CPUID:
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
- handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax,
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_CPUID, 1);
+ handled = x86_emulate_cpuid(svm_sc->vm, vcpuid, &state->rax,
&ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
break;
case VMCB_EXIT_HLT:
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_HLT, 1);
vmexit->exitcode = VM_EXITCODE_HLT;
vmexit->u.hlt.rflags = state->rflags;
break;
case VMCB_EXIT_PAUSE:
vmexit->exitcode = VM_EXITCODE_PAUSE;
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_PAUSE, 1);
break;
case VMCB_EXIT_NPF:
/* EXITINFO2 contains the faulting guest physical address */
if (info1 & VMCB_NPF_INFO1_RSV) {
- VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
+ VCPU_CTR2(svm_sc->vm, vcpuid, "nested page fault with "
"reserved bits set: info1(%#lx) info2(%#lx)",
info1, info2);
- } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
+ } else if (vm_mem_allocated(svm_sc->vm, vcpuid, info2)) {
vmexit->exitcode = VM_EXITCODE_PAGING;
vmexit->u.paging.gpa = info2;
vmexit->u.paging.fault_type = npf_fault_type(info1);
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
- VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_NESTED_FAULT, 1);
+ VCPU_CTR3(svm_sc->vm, vcpuid, "nested page fault "
"on gpa %#lx/%#lx at rip %#lx",
info2, info1, state->rip);
} else if (svm_npf_emul_fault(info1)) {
svm_handle_inst_emul(vmcb, info2, vmexit);
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
- VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INST_EMUL, 1);
+ VCPU_CTR3(svm_sc->vm, vcpuid, "inst_emul fault "
"for gpa %#lx/%#lx at rip %#lx",
info2, info1, state->rip);
}
@@ -1552,7 +1567,7 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
case VMCB_EXIT_SKINIT:
case VMCB_EXIT_ICEBP:
case VMCB_EXIT_INVLPGA:
- vm_inject_ud(svm_sc->vm, vcpu);
+ vm_inject_ud(svm_sc->vm, vcpuid);
handled = 1;
break;
case VMCB_EXIT_INVD:
@@ -1561,11 +1576,11 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
handled = 1;
break;
default:
- vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_UNKNOWN, 1);
break;
}
- VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
+ VCPU_CTR4(svm_sc->vm, vcpuid, "%s %s vmexit at %#lx/%d",
handled ? "handled" : "unhandled", exit_reason_to_str(code),
vmexit->rip, vmexit->inst_length);
@@ -1591,11 +1606,12 @@ svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
}
static void
-svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
+svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
{
uint64_t intinfo;
+ int vcpuid = vcpu->vcpuid;
- if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
+ if (!vm_entry_intinfo(svm_sc->vm, vcpuid, &intinfo))
return;
KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
@@ -1605,34 +1621,34 @@ svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
VMCB_EXITINTINFO_VECTOR(intinfo),
VMCB_EXITINTINFO_EC(intinfo),
VMCB_EXITINTINFO_EC_VALID(intinfo));
- vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
- VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
+ vmm_stat_incr(svm_sc->vm, vcpuid, VCPU_INTINFO_INJECTED, 1);
+ VCPU_CTR1(svm_sc->vm, vcpuid, "Injected entry intinfo: %#lx", intinfo);
}
/*
* Inject event to virtual cpu.
*/
static void
-svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
+svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
+ struct vlapic *vlapic)
{
struct vmcb_ctrl *ctrl;
struct vmcb_state *state;
- struct svm_vcpu *vcpustate;
uint8_t v_tpr;
int vector, need_intr_window;
int extint_pending;
+ int vcpuid = vcpu->vcpuid;
- state = svm_get_vmcb_state(sc, vcpu);
- ctrl = svm_get_vmcb_ctrl(sc, vcpu);
- vcpustate = svm_get_vcpu(sc, vcpu);
+ state = svm_get_vmcb_state(vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
need_intr_window = 0;
- if (vcpustate->nextrip != state->rip) {
+ if (vcpu->nextrip != state->rip) {
ctrl->intr_shadow = 0;
- VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
+ VCPU_CTR2(sc->vm, vcpuid, "Guest interrupt blocking "
"cleared due to rip change: %#lx/%#lx",
- vcpustate->nextrip, state->rip);
+ vcpu->nextrip, state->rip);
}
/*
@@ -1647,19 +1663,19 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
svm_inj_intinfo(sc, vcpu);
/* NMI event has priority over interrupts. */
- if (vm_nmi_pending(sc->vm, vcpu)) {
+ if (vm_nmi_pending(sc->vm, vcpuid)) {
if (nmi_blocked(sc, vcpu)) {
/*
* Can't inject another NMI if the guest has not
* yet executed an "iret" after the last NMI.
*/
- VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
+ VCPU_CTR0(sc->vm, vcpuid, "Cannot inject NMI due "
"to NMI-blocking");
} else if (ctrl->intr_shadow) {
/*
* Can't inject an NMI if the vcpu is in an intr_shadow.
*/
- VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
+ VCPU_CTR0(sc->vm, vcpuid, "Cannot inject NMI due to "
"interrupt shadow");
need_intr_window = 1;
goto done;
@@ -1668,7 +1684,7 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
* If there is already an exception/interrupt pending
* then defer the NMI until after that.
*/
- VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
+ VCPU_CTR1(sc->vm, vcpuid, "Cannot inject NMI due to "
"eventinj %#lx", ctrl->eventinj);
/*
@@ -1683,7 +1699,7 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
*/
ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */
} else {
- vm_nmi_clear(sc->vm, vcpu);
+ vm_nmi_clear(sc->vm, vcpuid);
/* Inject NMI, vector number is not used */
svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
@@ -1692,11 +1708,11 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
/* virtual NMI blocking is now in effect */
enable_nmi_blocking(sc, vcpu);
- VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
+ VCPU_CTR0(sc->vm, vcpuid, "Injecting vNMI");
}
}
- extint_pending = vm_extint_pending(sc->vm, vcpu);
+ extint_pending = vm_extint_pending(sc->vm, vcpuid);
if (!extint_pending) {
if (!vlapic_pending_intr(vlapic, &vector))
goto done;
@@ -1714,21 +1730,21 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
* then we cannot inject the pending interrupt.
*/
if ((state->rflags & PSL_I) == 0) {
- VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
+ VCPU_CTR2(sc->vm, vcpuid, "Cannot inject vector %d due to "
"rflags %#lx", vector, state->rflags);
need_intr_window = 1;
goto done;
}
if (ctrl->intr_shadow) {
- VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
+ VCPU_CTR1(sc->vm, vcpuid, "Cannot inject vector %d due to "
"interrupt shadow", vector);
need_intr_window = 1;
goto done;
}
if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
- VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
+ VCPU_CTR2(sc->vm, vcpuid, "Cannot inject vector %d due to "
"eventinj %#lx", vector, ctrl->eventinj);
need_intr_window = 1;
goto done;
@@ -1739,7 +1755,7 @@ svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
if (!extint_pending) {
vlapic_intr_accepted(vlapic, vector);
} else {
- vm_extint_clear(sc->vm, vcpu);
+ vm_extint_clear(sc->vm, vcpuid);
vatpic_intr_accepted(sc->vm, vector);
}
@@ -1765,10 +1781,10 @@ done:
v_tpr = vlapic_get_cr8(vlapic);
KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
if (ctrl->v_tpr != v_tpr) {
- VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
+ VCPU_CTR2(sc->vm, vcpuid, "VMCB V_TPR changed from %#x to %#x",
ctrl->v_tpr, v_tpr);
ctrl->v_tpr = v_tpr;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
+ svm_set_dirty(vcpu, VMCB_CACHE_TPR);
}
if (need_intr_window) {
@@ -1810,9 +1826,8 @@ restore_host_tss(void)
}
static void
-svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
+svm_pmap_activate(struct svm_softc *sc, struct svm_vcpu *vcpu, pmap_t pmap)
{
- struct svm_vcpu *vcpustate;
struct vmcb_ctrl *ctrl;
long eptgen;
int cpu;
@@ -1822,8 +1837,7 @@ svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
CPU_SET_ATOMIC(cpu, &pmap->pm_active);
smr_enter(pmap->pm_eptsmr);
- vcpustate = svm_get_vcpu(sc, vcpuid);
- ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
/*
* The TLB entries associated with the vcpu's ASID are not valid
@@ -1864,9 +1878,9 @@ svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
eptgen = atomic_load_long(&pmap->pm_eptgen);
ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
- if (vcpustate->asid.gen != asid[cpu].gen) {
+ if (vcpu->asid.gen != asid[cpu].gen) {
alloc_asid = true; /* (c) and (d) */
- } else if (vcpustate->eptgen != eptgen) {
+ } else if (vcpu->eptgen != eptgen) {
if (flush_by_asid())
ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
else
@@ -1894,11 +1908,11 @@ svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
if (!flush_by_asid())
ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
}
- vcpustate->asid.gen = asid[cpu].gen;
- vcpustate->asid.num = asid[cpu].num;
+ vcpu->asid.gen = asid[cpu].gen;
+ vcpu->asid.num = asid[cpu].num;
- ctrl->asid = vcpustate->asid.num;
- svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
+ ctrl->asid = vcpu->asid.num;
+ svm_set_dirty(vcpu, VMCB_CACHE_ASID);
/*
* If this cpu supports "flush-by-asid" then the TLB
* was not flushed after the generation bump. The TLB
@@ -1907,11 +1921,11 @@ svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
if (flush_by_asid())
ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
}
- vcpustate->eptgen = eptgen;
+ vcpu->eptgen = eptgen;
KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
- KASSERT(ctrl->asid == vcpustate->asid.num,
- ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
+ KASSERT(ctrl->asid == vcpu->asid.num,
+ ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num));
}
static void
@@ -1993,47 +2007,48 @@ svm_dr_leave_guest(struct svm_regctx *gctx)
* Start vcpu with specified RIP.
*/
static int
-svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
+svm_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
struct vm_eventinfo *evinfo)
{
struct svm_regctx *gctx;
struct svm_softc *svm_sc;
- struct svm_vcpu *vcpustate;
+ struct svm_vcpu *vcpu;
struct vmcb_state *state;
struct vmcb_ctrl *ctrl;
struct vm_exit *vmexit;
struct vlapic *vlapic;
struct vm *vm;
uint64_t vmcb_pa;
- int handled;
+ int handled, vcpuid;
uint16_t ldt_sel;
svm_sc = arg;
vm = svm_sc->vm;
- vcpustate = svm_get_vcpu(svm_sc, vcpu);
- state = svm_get_vmcb_state(svm_sc, vcpu);
- ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
- vmexit = vm_exitinfo(vm, vcpu);
- vlapic = vm_lapic(vm, vcpu);
+ vcpu = vcpui;
+ vcpuid = vcpu->vcpuid;
+ state = svm_get_vmcb_state(vcpu);
+ ctrl = svm_get_vmcb_ctrl(vcpu);
+ vmexit = vm_exitinfo(vm, vcpuid);
+ vlapic = vm_lapic(vm, vcpuid);
- gctx = svm_get_guest_regctx(svm_sc, vcpu);
- vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
+ gctx = svm_get_guest_regctx(vcpu);
+ vmcb_pa = vcpu->vmcb_pa;
- if (vcpustate->lastcpu != curcpu) {
+ if (vcpu->lastcpu != curcpu) {
/*
* Force new ASID allocation by invalidating the generation.
*/
- vcpustate->asid.gen = 0;
+ vcpu->asid.gen = 0;
/*
* Invalidate the VMCB state cache by marking all fields dirty.
*/
- svm_set_dirty(svm_sc, vcpu, 0xffffffff);
+ svm_set_dirty(vcpu, 0xffffffff);
/*
* XXX
- * Setting 'vcpustate->lastcpu' here is bit premature because
+ * Setting 'vcpu->lastcpu' here is bit premature because
* we may return from this function without actually executing
* the VMRUN instruction. This could happen if a rendezvous
* or an AST is pending on the first time through the loop.
@@ -2041,8 +2056,8 @@ svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
* This works for now but any new side-effects of vcpu
* migration should take this case into account.
*/
- vcpustate->lastcpu = curcpu;
- vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
+ vcpu->lastcpu = curcpu;
+ vmm_stat_incr(vm, vcpuid, VCPU_MIGRATIONS, 1);
}
svm_msr_guest_enter(svm_sc, vcpu);
@@ -2062,32 +2077,32 @@ svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
if (vcpu_suspended(evinfo)) {
enable_gintr();
- vm_exit_suspended(vm, vcpu, state->rip);
+ vm_exit_suspended(vm, vcpuid, state->rip);
break;
}
if (vcpu_rendezvous_pending(evinfo)) {
enable_gintr();
- vm_exit_rendezvous(vm, vcpu, state->rip);
+ vm_exit_rendezvous(vm, vcpuid, state->rip);
break;
}
if (vcpu_reqidle(evinfo)) {
enable_gintr();
- vm_exit_reqidle(vm, vcpu, state->rip);
+ vm_exit_reqidle(vm, vcpuid, state->rip);
break;
}
/* We are asked to give the cpu by scheduler. */
- if (vcpu_should_yield(vm, vcpu)) {
+ if (vcpu_should_yield(vm, vcpuid)) {
enable_gintr();
- vm_exit_astpending(vm, vcpu, state->rip);
+ vm_exit_astpending(vm, vcpuid, state->rip);
break;
}
- if (vcpu_debugged(vm, vcpu)) {
+ if (vcpu_debugged(vm, vcpuid)) {
enable_gintr();
- vm_exit_debug(vm, vcpu, state->rip);
+ vm_exit_debug(vm, vcpuid, state->rip);
break;
}
@@ -2108,12 +2123,12 @@ svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
*/
svm_pmap_activate(svm_sc, vcpu, pmap);
- ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
- vcpustate->dirty = 0;
- VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
+ ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
+ vcpu->dirty = 0;
+ VCPU_CTR1(vm, vcpuid, "vmcb clean %#x", ctrl->vmcb_clean);
/* Launch Virtual Machine. */
- VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
+ VCPU_CTR1(vm, vcpuid, "Resume execution at %#lx", state->rip);
svm_dr_enter_guest(gctx);
svm_launch(vmcb_pa, gctx, get_pcpu());
svm_dr_leave_guest(gctx);
@@ -2134,7 +2149,7 @@ svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
enable_gintr();
/* Update 'nextrip' */
- vcpustate->nextrip = state->rip;
+ vcpu->nextrip = state->rip;
/* Handle #VMEXIT and if required return to user space. */
handled = svm_vmexit(svm_sc, vcpu, vmexit);
@@ -2146,17 +2161,19 @@ svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
}
static void
+svm_vcpu_cleanup(void *arg, void *vcpui)
+{
+ struct svm_vcpu *vcpu = vcpui;
+
+ free(vcpu->vmcb, M_SVM);
+ free(vcpu, M_SVM);
+}
+
+static void
svm_cleanup(void *arg)
{
struct svm_softc *sc = arg;
- struct svm_vcpu *vcpu;
- uint16_t i, maxcpus;
- maxcpus = vm_get_maxcpus(sc->vm);
- for (i = 0; i < maxcpus; i++) {
- vcpu = svm_get_vcpu(sc, i);
- free(vcpu->vmcb, M_SVM);
- }
contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
free(sc, M_SVM);
@@ -2209,12 +2226,14 @@ swctx_regptr(struct svm_regctx *regctx, int reg)
}
static int
-svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
+svm_getreg(void *arg, void *vcpui, int ident, uint64_t *val)
{
struct svm_softc *svm_sc;
+ struct svm_vcpu *vcpu;
register_t *reg;
svm_sc = arg;
+ vcpu = vcpui;
if (ident == VM_REG_GUEST_INTR_SHADOW) {
return (svm_get_intr_shadow(svm_sc, vcpu, val));
@@ -2224,24 +2243,27 @@ svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
return (0);
}
- reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
+ reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
if (reg != NULL) {
*val = *reg;
return (0);
}
- VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
+ VCPU_CTR1(svm_sc->vm, vcpu->vcpuid, "svm_getreg: unknown register %#x",
+ ident);
return (EINVAL);
}
static int
-svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
+svm_setreg(void *arg, void *vcpui, int ident, uint64_t val)
{
struct svm_softc *svm_sc;
+ struct svm_vcpu *vcpu;
register_t *reg;
svm_sc = arg;
+ vcpu = vcpui;
if (ident == VM_REG_GUEST_INTR_SHADOW) {
return (svm_modify_intr_shadow(svm_sc, vcpu, val));
@@ -2254,7 +2276,7 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
}
}
- reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
+ reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
if (reg != NULL) {
*reg = val;
@@ -2272,32 +2294,33 @@ svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
* whether 'running' is true/false.
*/
- VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
+ VCPU_CTR1(svm_sc->vm, vcpu->vcpuid, "svm_setreg: unknown register %#x",
+ ident);
return (EINVAL);
}
static int
-svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+svm_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
{
- return (vmcb_getdesc(arg, vcpu, reg, desc));
+ return (vmcb_getdesc(arg, vcpui, reg, desc));
}
static int
-svm_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+svm_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
{
- return (vmcb_setdesc(arg, vcpu, reg, desc));
+ return (vmcb_setdesc(arg, vcpui, reg, desc));
}
#ifdef BHYVE_SNAPSHOT
static int
-svm_snapshot_reg(void *arg, int vcpu, int ident,
+svm_snapshot_reg(void *arg, void *vcpui, int ident,
struct vm_snapshot_meta *meta)
{
int ret;
uint64_t val;
if (meta->op == VM_SNAPSHOT_SAVE) {
- ret = svm_getreg(arg, vcpu, ident, &val);
+ ret = svm_getreg(arg, vcpui, ident, &val);
if (ret != 0)
goto done;
@@ -2305,7 +2328,7 @@ svm_snapshot_reg(void *arg, int vcpu, int ident,
} else if (meta->op == VM_SNAPSHOT_RESTORE) {
SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
- ret = svm_setreg(arg, vcpu, ident, val);
+ ret = svm_setreg(arg, vcpui, ident, val);
if (ret != 0)
goto done;
} else {
@@ -2319,13 +2342,15 @@ done:
#endif
static int
-svm_setcap(void *arg, int vcpu, int type, int val)
+svm_setcap(void *arg, void *vcpui, int type, int val)
{
struct svm_softc *sc;
+ struct svm_vcpu *vcpu;
struct vlapic *vlapic;
int error;
sc = arg;
+ vcpu = vcpui;
error = 0;
switch (type) {
case VM_CAP_HALT_EXIT:
@@ -2342,7 +2367,7 @@ svm_setcap(void *arg, int vcpu, int type, int val)
error = EINVAL;
break;
case VM_CAP_IPI_EXIT:
- vlapic = vm_lapic(sc->vm, vcpu);
+ vlapic = vm_lapic(sc->vm, vcpu->vcpuid);
vlapic->ipi_exit = val;
break;
default:
@@ -2353,13 +2378,15 @@ svm_setcap(void *arg, int vcpu, int type, int val)
}
static int
-svm_getcap(void *arg, int vcpu, int type, int *retval)
+svm_getcap(void *arg, void *vcpui, int type, int *retval)
{
struct svm_softc *sc;
+ struct svm_vcpu *vcpu;
struct vlapic *vlapic;
int error;
sc = arg;
+ vcpu = vcpui;
error = 0;
switch (type) {
@@ -2375,7 +2402,7 @@ svm_getcap(void *arg, int vcpu, int type, int *retval)
*retval = 1; /* unrestricted guest is always enabled */
break;
case VM_CAP_IPI_EXIT:
- vlapic = vm_lapic(sc->vm, vcpu);
+ vlapic = vm_lapic(sc->vm, vcpu->vcpuid);
*retval = vlapic->ipi_exit;
break;
default:
@@ -2398,15 +2425,17 @@ svm_vmspace_free(struct vmspace *vmspace)
}
static struct vlapic *
-svm_vlapic_init(void *arg, int vcpuid)
+svm_vlapic_init(void *arg, void *vcpui)
{
struct svm_softc *svm_sc;
+ struct svm_vcpu *vcpu;
struct vlapic *vlapic;
svm_sc = arg;
+ vcpu = vcpui;
vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
vlapic->vm = svm_sc->vm;
- vlapic->vcpuid = vcpuid;
+ vlapic->vcpuid = vcpu->vcpuid;
vlapic->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_SVM_VLAPIC,
M_WAITOK | M_ZERO);
@@ -2435,163 +2464,163 @@ svm_snapshot(void *arg, struct vm_snapshot_meta *meta)
}
static int
-svm_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpuid)
+svm_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
{
struct svm_softc *sc;
struct svm_vcpu *vcpu;
int err, running, hostcpu;
sc = (struct svm_softc *)arg;
- vcpu = &sc->vcpu[vcpuid];
+ vcpu = vcpui;
err = 0;
KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
- running = vcpu_is_running(sc->vm, vcpuid, &hostcpu);
+ running = vcpu_is_running(sc->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu) {
printf("%s: %s%d is running", __func__, vm_name(sc->vm),
- vcpuid);
+ vcpu->vcpuid);
return (EINVAL);
}
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_CR0, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_CR2, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_CR3, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_CR4, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_DR6, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_DR7, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR6, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_RAX, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_RSP, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_RIP, meta);
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_RFLAGS, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta);
/* Guest segments */
/* ES */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_ES, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_ES, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta);
/* CS */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_CS, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_CS, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta);
/* SS */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_SS, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_SS, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta);
/* DS */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_DS, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_DS, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta);
/* FS */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_FS, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_FS, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta);
/* GS */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_GS, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_GS, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta);
/* TR */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_TR, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_TR, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta);
/* LDTR */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_LDTR, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_LDTR, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta);
/* EFER */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_EFER, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta);
/* IDTR and GDTR */
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_IDTR, meta);
- err += vmcb_snapshot_desc(sc, vcpuid, VM_REG_GUEST_GDTR, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta);
+ err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta);
/* Specific AMD registers */
- err += svm_snapshot_reg(sc, vcpuid, VM_REG_GUEST_INTR_SHADOW, meta);
+ err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_PAUSE_FILTHRESH, 2), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_PAUSE_FILCNT, 2), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_ASID, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_NP_ENABLE, 1), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_CPL, 1), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_STAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_LSTAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_CSTAR, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_SFMASK, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_KERNELGBASE, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_DBGCTL, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_BR_FROM, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_BR_TO, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_INT_FROM, 8), meta);
- err += vmcb_snapshot_any(sc, vcpuid,
+ err += vmcb_snapshot_any(sc, vcpu,
VMCB_ACCESS(VMCB_OFF_INT_TO, 8), meta);
if (err != 0)
goto done;
@@ -2633,17 +2662,18 @@ svm_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpuid)
/* Set all caches dirty */
if (meta->op == VM_SNAPSHOT_RESTORE)
- svm_set_dirty(sc, vcpuid, 0xffffffff);
+ svm_set_dirty(vcpu, 0xffffffff);
+
done:
return (err);
}
static int
-svm_restore_tsc(void *arg, int vcpu, uint64_t offset)
+svm_restore_tsc(void *arg, void *vcpui, uint64_t offset)
{
int err;
- err = svm_set_tsc_offset(arg, vcpu, offset);
+ err = svm_set_tsc_offset(arg, vcpui, offset);
return (err);
}
@@ -2656,6 +2686,8 @@ const struct vmm_ops vmm_ops_amd = {
.init = svm_init,
.run = svm_run,
.cleanup = svm_cleanup,
+ .vcpu_init = svm_vcpu_init,
+ .vcpu_cleanup = svm_vcpu_cleanup,
.getreg = svm_getreg,
.setreg = svm_setreg,
.getdesc = svm_getdesc,
diff --git a/sys/amd64/vmm/amd/svm.h b/sys/amd64/vmm/amd/svm.h
index 30e58b9e130f..26f4809203d7 100644
--- a/sys/amd64/vmm/amd/svm.h
+++ b/sys/amd64/vmm/amd/svm.h
@@ -33,6 +33,7 @@
struct pcpu;
struct svm_softc;
+struct svm_vcpu;
/*
* Guest register state that is saved outside the VMCB.
@@ -68,7 +69,8 @@ struct svm_regctx {
void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
#ifdef BHYVE_SNAPSHOT
-int svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset);
+int svm_set_tsc_offset(struct svm_softc *sc, struct svm_vcpu *vcpu,
+ uint64_t offset);
#endif
#endif /* _SVM_H_ */
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
index f0cea633a0cf..65cc23352620 100644
--- a/sys/amd64/vmm/amd/svm_msr.c
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -72,7 +72,7 @@ svm_msr_init(void)
}
void
-svm_msr_guest_init(struct svm_softc *sc, int vcpu)
+svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
/*
* All the MSRs accessible to the guest are either saved/restored by
@@ -86,7 +86,7 @@ svm_msr_guest_init(struct svm_softc *sc, int vcpu)
}
void
-svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
+svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
/*
* Save host MSRs (if any) and restore guest MSRs (if any).
@@ -94,7 +94,7 @@ svm_msr_guest_enter(struct svm_softc *sc, int vcpu)
}
void
-svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
+svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu)
{
/*
* Save guest MSRs (if any) and restore host MSRs.
@@ -108,8 +108,8 @@ svm_msr_guest_exit(struct svm_softc *sc, int vcpu)
}
int
-svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
- bool *retu)
+svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
+ uint64_t *result, bool *retu)
{
int error = 0;
@@ -124,8 +124,8 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
- if (vm_rdmtrr(&sc->vcpu[vcpu].mtrr, num, result) != 0) {
- vm_inject_gp(sc->vm, vcpu);
+ if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {
+ vm_inject_gp(sc->vm, vcpu->vcpuid);
}
break;
case MSR_SYSCFG:
@@ -142,7 +142,8 @@ svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
}
int
-svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
+svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num, uint64_t val,
+ bool *retu)
{
int error = 0;
@@ -156,8 +157,8 @@ svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu)
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
- if (vm_wrmtrr(&sc->vcpu[vcpu].mtrr, num, val) != 0) {
- vm_inject_gp(sc->vm, vcpu);
+ if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
+ vm_inject_gp(sc->vm, vcpu->vcpuid);
}
break;
case MSR_SYSCFG:
diff --git a/sys/amd64/vmm/amd/svm_msr.h b/sys/amd64/vmm/amd/svm_msr.h
index 1dba8101ab35..9e78b7f15ae8 100644
--- a/sys/amd64/vmm/amd/svm_msr.h
+++ b/sys/amd64/vmm/amd/svm_msr.h
@@ -32,15 +32,16 @@
#define _SVM_MSR_H_
struct svm_softc;
+struct svm_vcpu;
void svm_msr_init(void);
-void svm_msr_guest_init(struct svm_softc *sc, int vcpu);
-void svm_msr_guest_enter(struct svm_softc *sc, int vcpu);
-void svm_msr_guest_exit(struct svm_softc *sc, int vcpu);
+void svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu);
+void svm_msr_guest_enter(struct svm_softc *sc, struct svm_vcpu *vcpu);
+void svm_msr_guest_exit(struct svm_softc *sc, struct svm_vcpu *vcpu);
-int svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
- bool *retu);
-int svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result,
- bool *retu);
+int svm_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
+ uint64_t val, bool *retu);
+int svm_rdmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
+ uint64_t *result, bool *retu);
#endif /* _SVM_MSR_H_ */
diff --git a/sys/amd64/vmm/amd/svm_softc.h b/sys/amd64/vmm/amd/svm_softc.h
index b9e53ac9d4a0..e575b7c88de7 100644
--- a/sys/amd64/vmm/amd/svm_softc.h
+++ b/sys/amd64/vmm/amd/svm_softc.h
@@ -51,62 +51,52 @@ struct svm_vcpu {
long eptgen; /* pmap->pm_eptgen when the vcpu last ran */
struct asid asid;
struct vm_mtrr mtrr;
+ int vcpuid;
};
/*
* SVM softc, one per virtual machine.
*/
struct svm_softc {
- struct svm_vcpu vcpu[VM_MAXCPU];
- vm_offset_t nptp; /* nested page table */
+ vm_paddr_t nptp; /* nested page table */
uint8_t *iopm_bitmap; /* shared by all vcpus */
uint8_t *msr_bitmap; /* shared by all vcpus */
struct vm *vm;
};
-static __inline struct svm_vcpu *
-svm_get_vcpu(struct svm_softc *sc, int vcpu)
-{
-
- return (&(sc->vcpu[vcpu]));
-}
-
static __inline struct vmcb *
-svm_get_vmcb(struct svm_softc *sc, int vcpu)
+svm_get_vmcb(struct svm_vcpu *vcpu)
{
- return ((sc->vcpu[vcpu].vmcb));
+ return (vcpu->vmcb);
}
static __inline struct vmcb_state *
-svm_get_vmcb_state(struct svm_softc *sc, int vcpu)
+svm_get_vmcb_state(struct svm_vcpu *vcpu)
{
- return (&(sc->vcpu[vcpu].vmcb->state));
+ return (&vcpu->vmcb->state);
}
static __inline struct vmcb_ctrl *
-svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu)
+svm_get_vmcb_ctrl(struct svm_vcpu *vcpu)
{
- return (&(sc->vcpu[vcpu].vmcb->ctrl));
+ return (&vcpu->vmcb->ctrl);
}
static __inline struct svm_regctx *
-svm_get_guest_regctx(struct svm_softc *sc, int vcpu)
+svm_get_guest_regctx(struct svm_vcpu *vcpu)
{
- return (&(sc->vcpu[vcpu].swctx));
+ return (&vcpu->swctx);
}
static __inline void
-svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits)
+svm_set_dirty(struct svm_vcpu *vcpu, uint32_t dirtybits)
{
- struct svm_vcpu *vcpustate;
-
- vcpustate = svm_get_vcpu(sc, vcpu);
- vcpustate->dirty |= dirtybits;
+ vcpu->dirty |= dirtybits;
}
#endif /* _SVM_SOFTC_H_ */
diff --git a/sys/amd64/vmm/amd/vmcb.c b/sys/amd64/vmm/amd/vmcb.c
index 69fe853ca843..566282781132 100644
--- a/sys/amd64/vmm/amd/vmcb.c
+++ b/sys/amd64/vmm/amd/vmcb.c
@@ -116,14 +116,14 @@ vmcb_segptr(struct vmcb *vmcb, int type)
}
static int
-vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
- uint64_t *val)
+vmcb_access(struct svm_softc *softc, struct svm_vcpu *vcpu, int write,
+ int ident, uint64_t *val)
{
struct vmcb *vmcb;
int off, bytes;
char *ptr;
- vmcb = svm_get_vmcb(softc, vcpu);
+ vmcb = svm_get_vmcb(vcpu);
off = VMCB_ACCESS_OFFSET(ident);
bytes = VMCB_ACCESS_BYTES(ident);
@@ -146,14 +146,14 @@ vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
memcpy(val, ptr + off, bytes);
break;
default:
- VCPU_CTR1(softc->vm, vcpu,
+ VCPU_CTR1(softc->vm, vcpu->vcpuid,
"Invalid size %d for VMCB access: %d", bytes);
return (EINVAL);
}
/* Invalidate all VMCB state cached by h/w. */
if (write)
- svm_set_dirty(softc, vcpu, 0xffffffff);
+ svm_set_dirty(vcpu, 0xffffffff);
return (0);
}
@@ -162,14 +162,15 @@ vmcb_access(struct svm_softc *softc, int vcpu, int write, int ident,
* Read from segment selector, control and general purpose register of VMCB.
*/
int
-vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
+vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t *retval)
{
struct vmcb *vmcb;
struct vmcb_state *state;
struct vmcb_segment *seg;
int err;
- vmcb = svm_get_vmcb(sc, vcpu);
+ vmcb = svm_get_vmcb(vcpu);
state = &vmcb->state;
err = 0;
@@ -252,14 +253,14 @@ vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval)
* Write to segment selector, control and general purpose register of VMCB.
*/
int
-vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
+vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident, uint64_t val)
{
struct vmcb *vmcb;
struct vmcb_state *state;
struct vmcb_segment *seg;
int err, dirtyseg;
- vmcb = svm_get_vmcb(sc, vcpu);
+ vmcb = svm_get_vmcb(vcpu);
state = &vmcb->state;
dirtyseg = 0;
err = 0;
@@ -270,38 +271,38 @@ vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
switch (ident) {
case VM_REG_GUEST_CR0:
state->cr0 = val;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+ svm_set_dirty(vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_CR2:
state->cr2 = val;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_CR2);
+ svm_set_dirty(vcpu, VMCB_CACHE_CR2);
break;
case VM_REG_GUEST_CR3:
state->cr3 = val;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+ svm_set_dirty(vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_CR4:
state->cr4 = val;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+ svm_set_dirty(vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_DR6:
state->dr6 = val;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
+ svm_set_dirty(vcpu, VMCB_CACHE_DR);
break;
case VM_REG_GUEST_DR7:
state->dr7 = val;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_DR);
+ svm_set_dirty(vcpu, VMCB_CACHE_DR);
break;
case VM_REG_GUEST_EFER:
/* EFER_SVM must always be set when the guest is executing */
state->efer = val | EFER_SVM;
- svm_set_dirty(sc, vcpu, VMCB_CACHE_CR);
+ svm_set_dirty(vcpu, VMCB_CACHE_CR);
break;
case VM_REG_GUEST_RAX:
@@ -334,7 +335,7 @@ vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
__func__, ident));
seg->selector = val;
if (dirtyseg)
- svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
+ svm_set_dirty(vcpu, VMCB_CACHE_SEG);
break;
case VM_REG_GUEST_GDTR:
@@ -365,15 +366,14 @@ vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg2)
}
int
-vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
+ struct seg_desc *desc)
{
struct vmcb *vmcb;
- struct svm_softc *sc;
struct vmcb_segment *seg;
uint16_t attrib;
- sc = arg;
- vmcb = svm_get_vmcb(sc, vcpu);
+ vmcb = svm_get_vmcb(vcpu);
seg = vmcb_segptr(vmcb, reg);
KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
@@ -395,7 +395,7 @@ vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
seg->attrib = attrib;
}
- VCPU_CTR4(sc->vm, vcpu, "Setting desc %d: base (%#lx), limit (%#x), "
+ VCPU_CTR4(sc->vm, vcpu->vcpuid, "Setting desc %d: base (%#lx), limit (%#x), "
"attrib (%#x)", reg, seg->base, seg->limit, seg->attrib);
switch (reg) {
@@ -403,11 +403,11 @@ vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
case VM_REG_GUEST_DS:
case VM_REG_GUEST_ES:
case VM_REG_GUEST_SS:
- svm_set_dirty(sc, vcpu, VMCB_CACHE_SEG);
+ svm_set_dirty(vcpu, VMCB_CACHE_SEG);
break;
case VM_REG_GUEST_GDTR:
case VM_REG_GUEST_IDTR:
- svm_set_dirty(sc, vcpu, VMCB_CACHE_DT);
+ svm_set_dirty(vcpu, VMCB_CACHE_DT);
break;
default:
break;
@@ -417,14 +417,13 @@ vmcb_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
}
int
-vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
+ struct seg_desc *desc)
{
struct vmcb *vmcb;
- struct svm_softc *sc;
struct vmcb_segment *seg;
- sc = arg;
- vmcb = svm_get_vmcb(sc, vcpu);
+ vmcb = svm_get_vmcb(vcpu);
seg = vmcb_segptr(vmcb, reg);
KASSERT(seg != NULL, ("%s: invalid segment descriptor %d",
__func__, reg));
@@ -459,15 +458,11 @@ vmcb_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
#ifdef BHYVE_SNAPSHOT
int
-vmcb_getany(struct svm_softc *sc, int vcpu, int ident, uint64_t *val)
+vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t *val)
{
int error = 0;
- if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm)) {
- error = EINVAL;
- goto err;
- }
-
if (ident >= VM_REG_LAST) {
error = EINVAL;
goto err;
@@ -480,15 +475,11 @@ err:
}
int
-vmcb_setany(struct svm_softc *sc, int vcpu, int ident, uint64_t val)
+vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t val)
{
int error = 0;
- if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm)) {
- error = EINVAL;
- goto err;
- }
-
if (ident >= VM_REG_LAST) {
error = EINVAL;
goto err;
@@ -501,13 +492,14 @@ err:
}
int
-vmcb_snapshot_desc(void *arg, int vcpu, int reg, struct vm_snapshot_meta *meta)
+vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
+ struct vm_snapshot_meta *meta)
{
int ret;
struct seg_desc desc;
if (meta->op == VM_SNAPSHOT_SAVE) {
- ret = vmcb_getdesc(arg, vcpu, reg, &desc);
+ ret = vmcb_getdesc(sc, vcpu, reg, &desc);
if (ret != 0)
goto done;
@@ -519,7 +511,7 @@ vmcb_snapshot_desc(void *arg, int vcpu, int reg, struct vm_snapshot_meta *meta)
SNAPSHOT_VAR_OR_LEAVE(desc.limit, meta, ret, done);
SNAPSHOT_VAR_OR_LEAVE(desc.access, meta, ret, done);
- ret = vmcb_setdesc(arg, vcpu, reg, &desc);
+ ret = vmcb_setdesc(sc, vcpu, reg, &desc);
if (ret != 0)
goto done;
} else {
@@ -532,7 +524,7 @@ done:
}
int
-vmcb_snapshot_any(struct svm_softc *sc, int vcpu, int ident,
+vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
struct vm_snapshot_meta *meta)
{
int ret;
diff --git a/sys/amd64/vmm/amd/vmcb.h b/sys/amd64/vmm/amd/vmcb.h
index 084f4465cb49..b5815e36fca1 100644
--- a/sys/amd64/vmm/amd/vmcb.h
+++ b/sys/amd64/vmm/amd/vmcb.h
@@ -234,6 +234,7 @@
#ifdef _KERNEL
struct svm_softc;
+struct svm_vcpu;
struct vm_snapshot_meta;
/* VMCB save state area segment format */
@@ -353,17 +354,23 @@ struct vmcb {
CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
CTASSERT(offsetof(struct vmcb, state) == 0x400);
-int vmcb_read(struct svm_softc *sc, int vcpu, int ident, uint64_t *retval);
-int vmcb_write(struct svm_softc *sc, int vcpu, int ident, uint64_t val);
-int vmcb_setdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
-int vmcb_getdesc(void *arg, int vcpu, int ident, struct seg_desc *desc);
+int vmcb_read(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t *retval);
+int vmcb_write(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t val);
+int vmcb_setdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ struct seg_desc *desc);
+int vmcb_getdesc(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ struct seg_desc *desc);
int vmcb_seg(struct vmcb *vmcb, int ident, struct vmcb_segment *seg);
#ifdef BHYVE_SNAPSHOT
-int vmcb_getany(struct svm_softc *sc, int vcpu, int ident, uint64_t *val);
-int vmcb_setany(struct svm_softc *sc, int vcpu, int ident, uint64_t val);
-int vmcb_snapshot_desc(void *arg, int vcpu, int reg,
+int vmcb_getany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t *val);
+int vmcb_setany(struct svm_softc *sc, struct svm_vcpu *vcpu, int ident,
+ uint64_t val);
+int vmcb_snapshot_desc(struct svm_softc *sc, struct svm_vcpu *vcpu, int reg,
struct vm_snapshot_meta *meta);
-int vmcb_snapshot_any(struct svm_softc *sc, int vcpu, int ident,
+int vmcb_snapshot_any(struct svm_softc *sc, struct svm_vcpu*vcpu, int ident,
struct vm_snapshot_meta *meta);
#endif
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 96e7907622cf..22cc0f9dca9c 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -310,12 +310,12 @@ SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
*/
#define APIC_ACCESS_ADDRESS 0xFFFFF000
-static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
-static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
+static int vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc);
+static int vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval);
static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
static void vmx_inject_pir(struct vlapic *vlapic);
#ifdef BHYVE_SNAPSHOT
-static int vmx_restore_tsc(void *arg, int vcpu, uint64_t now);
+static int vmx_restore_tsc(void *arg, void *vcpui, uint64_t now);
#endif
static inline bool
@@ -1033,13 +1033,9 @@ vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
static void *
vmx_init(struct vm *vm, pmap_t pmap)
{
- int i, error;
+ int error;
struct vmx *vmx;
- struct vmcs *vmcs;
- struct vmx_vcpu *vcpu;
- uint32_t exc_bitmap;
uint16_t maxcpus = vm_get_maxcpus(vm);
- uint16_t vpid[maxcpus];
vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
vmx->vm = vm;
@@ -1100,7 +1096,7 @@ vmx_init(struct vm *vm, pmap_t pmap)
((cap_rdpid || cap_rdtscp) && guest_msr_ro(vmx, MSR_TSC_AUX)))
panic("vmx_init: error setting guest msr access");
- vpid_alloc(vpid, maxcpus);
+ vpid_alloc(vmx->vpids, maxcpus);
if (virtual_interrupt_delivery) {
error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
@@ -1109,113 +1105,122 @@ vmx_init(struct vm *vm, pmap_t pmap)
KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
}
- for (i = 0; i < maxcpus; i++) {
- vcpu = &vmx->vcpus[i];
-
- vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX,
- M_WAITOK | M_ZERO);
- vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX,
- M_WAITOK | M_ZERO);
- vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64,
- M_VMX, M_WAITOK | M_ZERO);
-
- vmcs = vcpu->vmcs;
- vmcs->identifier = vmx_revision();
- error = vmclear(vmcs);
- if (error != 0) {
- panic("vmx_init: vmclear error %d on vcpu %d\n",
- error, i);
- }
+ vmx->pmap = pmap;
+ return (vmx);
+}
- vmx_msr_guest_init(vmx, i);
+static void *
+vmx_vcpu_init(void *arg, int vcpuid)
+{
+ struct vmx *vmx = arg;
+ struct vmcs *vmcs;
+ struct vmx_vcpu *vcpu;
+ uint32_t exc_bitmap;
+ int error;
- error = vmcs_init(vmcs);
- KASSERT(error == 0, ("vmcs_init error %d", error));
+ vcpu = malloc(sizeof(*vcpu), M_VMX, M_WAITOK | M_ZERO);
+ vcpu->vcpuid = vcpuid;
+ vcpu->vmcs = malloc_aligned(sizeof(*vmcs), PAGE_SIZE, M_VMX,
+ M_WAITOK | M_ZERO);
+ vcpu->apic_page = malloc_aligned(PAGE_SIZE, PAGE_SIZE, M_VMX,
+ M_WAITOK | M_ZERO);
+ vcpu->pir_desc = malloc_aligned(sizeof(*vcpu->pir_desc), 64, M_VMX,
+ M_WAITOK | M_ZERO);
- VMPTRLD(vmcs);
- error = 0;
- error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx);
- error += vmwrite(VMCS_EPTP, vmx->eptp);
- error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
- error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
- if (vcpu_trap_wbinvd(vm, i)) {
- KASSERT(cap_wbinvd_exit, ("WBINVD trap not available"));
- procbased_ctls2 |= PROCBASED2_WBINVD_EXITING;
- }
- error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
- error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
- error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
- error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
- error += vmwrite(VMCS_VPID, vpid[i]);
-
- if (guest_l1d_flush && !guest_l1d_flush_sw) {
- vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract(
- (vm_offset_t)&msr_load_list[0]));
- vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
- nitems(msr_load_list));
- vmcs_write(VMCS_EXIT_MSR_STORE, 0);
- vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
- }
+ vmcs = vcpu->vmcs;
+ vmcs->identifier = vmx_revision();
+ error = vmclear(vmcs);
+ if (error != 0) {
+ panic("vmx_init: vmclear error %d on vcpu %d\n",
+ error, vcpuid);
+ }
- /* exception bitmap */
- if (vcpu_trace_exceptions(vm, i))
- exc_bitmap = 0xffffffff;
- else
- exc_bitmap = 1 << IDT_MC;
- error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
+ vmx_msr_guest_init(vmx, vcpu);
- vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1;
- error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
+ error = vmcs_init(vmcs);
+ KASSERT(error == 0, ("vmcs_init error %d", error));
- if (tpr_shadowing) {
- error += vmwrite(VMCS_VIRTUAL_APIC,
- vtophys(vcpu->apic_page));
- }
+ VMPTRLD(vmcs);
+ error = 0;
+ error += vmwrite(VMCS_HOST_RSP, (u_long)&vcpu->ctx);
+ error += vmwrite(VMCS_EPTP, vmx->eptp);
+ error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
+ error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
+ if (vcpu_trap_wbinvd(vmx->vm, vcpuid)) {
+ KASSERT(cap_wbinvd_exit, ("WBINVD trap not available"));
+ procbased_ctls2 |= PROCBASED2_WBINVD_EXITING;
+ }
+ error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
+ error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
+ error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
+ error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
+ error += vmwrite(VMCS_VPID, vmx->vpids[vcpuid]);
+
+ if (guest_l1d_flush && !guest_l1d_flush_sw) {
+ vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract(
+ (vm_offset_t)&msr_load_list[0]));
+ vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
+ nitems(msr_load_list));
+ vmcs_write(VMCS_EXIT_MSR_STORE, 0);
+ vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
+ }
- if (virtual_interrupt_delivery) {
- error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
- error += vmwrite(VMCS_EOI_EXIT0, 0);
- error += vmwrite(VMCS_EOI_EXIT1, 0);
- error += vmwrite(VMCS_EOI_EXIT2, 0);
- error += vmwrite(VMCS_EOI_EXIT3, 0);
- }
- if (posted_interrupts) {
- error += vmwrite(VMCS_PIR_VECTOR, pirvec);
- error += vmwrite(VMCS_PIR_DESC,
- vtophys(vcpu->pir_desc));
- }
- VMCLEAR(vmcs);
- KASSERT(error == 0, ("vmx_init: error customizing the vmcs"));
+ /* exception bitmap */
+ if (vcpu_trace_exceptions(vmx->vm, vcpuid))
+ exc_bitmap = 0xffffffff;
+ else
+ exc_bitmap = 1 << IDT_MC;
+ error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
- vcpu->cap.set = 0;
- vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0;
- vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0;
- vcpu->cap.proc_ctls = procbased_ctls;
- vcpu->cap.proc_ctls2 = procbased_ctls2;
- vcpu->cap.exc_bitmap = exc_bitmap;
+ vcpu->ctx.guest_dr6 = DBREG_DR6_RESERVED1;
+ error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
- vcpu->state.nextrip = ~0;
- vcpu->state.lastcpu = NOCPU;
- vcpu->state.vpid = vpid[i];
+ if (tpr_shadowing) {
+ error += vmwrite(VMCS_VIRTUAL_APIC, vtophys(vcpu->apic_page));
+ }
- /*
- * Set up the CR0/4 shadows, and init the read shadow
- * to the power-on register value from the Intel Sys Arch.
- * CR0 - 0x60000010
- * CR4 - 0
- */
- error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
- if (error != 0)
- panic("vmx_setup_cr0_shadow %d", error);
+ if (virtual_interrupt_delivery) {
+ error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
+ error += vmwrite(VMCS_EOI_EXIT0, 0);
+ error += vmwrite(VMCS_EOI_EXIT1, 0);
+ error += vmwrite(VMCS_EOI_EXIT2, 0);
+ error += vmwrite(VMCS_EOI_EXIT3, 0);
+ }
+ if (posted_interrupts) {
+ error += vmwrite(VMCS_PIR_VECTOR, pirvec);
+ error += vmwrite(VMCS_PIR_DESC, vtophys(vcpu->pir_desc));
+ }
+ VMCLEAR(vmcs);
+ KASSERT(error == 0, ("vmx_init: error customizing the vmcs"));
- error = vmx_setup_cr4_shadow(vmcs, 0);
- if (error != 0)
- panic("vmx_setup_cr4_shadow %d", error);
+ vcpu->cap.set = 0;
+ vcpu->cap.set |= cap_rdpid != 0 ? 1 << VM_CAP_RDPID : 0;
+ vcpu->cap.set |= cap_rdtscp != 0 ? 1 << VM_CAP_RDTSCP : 0;
+ vcpu->cap.proc_ctls = procbased_ctls;
+ vcpu->cap.proc_ctls2 = procbased_ctls2;
+ vcpu->cap.exc_bitmap = exc_bitmap;
- vcpu->ctx.pmap = pmap;
- }
+ vcpu->state.nextrip = ~0;
+ vcpu->state.lastcpu = NOCPU;
+ vcpu->state.vpid = vmx->vpids[vcpuid];
- return (vmx);
+ /*
+ * Set up the CR0/4 shadows, and init the read shadow
+ * to the power-on register value from the Intel Sys Arch.
+ * CR0 - 0x60000010
+ * CR4 - 0
+ */
+ error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
+ if (error != 0)
+ panic("vmx_setup_cr0_shadow %d", error);
+
+ error = vmx_setup_cr4_shadow(vmcs, 0);
+ if (error != 0)
+ panic("vmx_setup_cr4_shadow %d", error);
+
+ vcpu->ctx.pmap = vmx->pmap;
+
+ return (vcpu);
}
static int
@@ -1230,29 +1235,30 @@ vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
}
static __inline void
-vmx_run_trace(struct vmx *vmx, int vcpu)
+vmx_run_trace(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
#ifdef KTR
- VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
+ VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Resume execution at %#lx",
+ vmcs_guest_rip());
#endif
}
static __inline void
-vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
- int handled)
+vmx_exit_trace(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t rip,
+ uint32_t exit_reason, int handled)
{
#ifdef KTR
- VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
+ VCPU_CTR3(vmx->vm, vcpu->vcpuid, "%s %s vmexit at 0x%0lx",
handled ? "handled" : "unhandled",
exit_reason_to_str(exit_reason), rip);
#endif
}
static __inline void
-vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
+vmx_astpending_trace(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t rip)
{
#ifdef KTR
- VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
+ VCPU_CTR1(vmx->vm, vcpu->vcpuid, "astpending vmexit at 0x%0lx", rip);
#endif
}
@@ -1263,12 +1269,12 @@ static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
* Invalidate guest mappings identified by its vpid from the TLB.
*/
static __inline void
-vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
+vmx_invvpid(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap, int running)
{
struct vmxstate *vmxstate;
struct invvpid_desc invvpid_desc;
- vmxstate = &vmx->vcpus[vcpu].state;
+ vmxstate = &vcpu->state;
if (vmxstate->vpid == 0)
return;
@@ -1284,7 +1290,7 @@ vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
}
KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
- "critical section", __func__, vcpu));
+ "critical section", __func__, vcpu->vcpuid));
/*
* Invalidate all mappings tagged with 'vpid'
@@ -1307,7 +1313,7 @@ vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
invvpid_desc.vpid = vmxstate->vpid;
invvpid_desc.linear_addr = 0;
invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
- vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
+ vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_DONE, 1);
} else {
/*
* The invvpid can be skipped if an invept is going to
@@ -1315,22 +1321,22 @@ vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
* will invalidate combined mappings tagged with
* 'vmx->eptp' for all vpids.
*/
- vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
+ vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_SAVED, 1);
}
}
static void
-vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
+vmx_set_pcpu_defaults(struct vmx *vmx, struct vmx_vcpu *vcpu, pmap_t pmap)
{
struct vmxstate *vmxstate;
- vmxstate = &vmx->vcpus[vcpu].state;
+ vmxstate = &vcpu->state;
if (vmxstate->lastcpu == curcpu)
return;
vmxstate->lastcpu = curcpu;
- vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
+ vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_MIGRATIONS, 1);
vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
@@ -1344,69 +1350,65 @@ vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
static void __inline
-vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
+vmx_set_int_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
- if ((vmx_vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
- vmx_vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
- vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx_vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
+ if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
+ vcpu->cap.proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
+ vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid,
+ "Enabling interrupt window exiting");
}
}
static void __inline
-vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
+vmx_clear_int_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
- KASSERT((vmx_vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
- ("intr_window_exiting not set: %#x", vmx_vcpu->cap.proc_ctls));
- vmx_vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
- vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx_vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
+ KASSERT((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
+ ("intr_window_exiting not set: %#x", vcpu->cap.proc_ctls));
+ vcpu->cap.proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
+ vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Disabling interrupt window exiting");
}
static void __inline
-vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
+vmx_set_nmi_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
- if ((vmx_vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
- vmx_vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
- vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx_vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
+ if ((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
+ vcpu->cap.proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
+ vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Enabling NMI window exiting");
}
}
static void __inline
-vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
+vmx_clear_nmi_window_exiting(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
- KASSERT((vmx_vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
- ("nmi_window_exiting not set %#x", vmx_vcpu->cap.proc_ctls));
- vmx_vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
- vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx_vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
+ KASSERT((vcpu->cap.proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
+ ("nmi_window_exiting not set %#x", vcpu->cap.proc_ctls));
+ vcpu->cap.proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
+ vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Disabling NMI window exiting");
}
int
-vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
+vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu, uint64_t offset)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
int error;
- if ((vmx_vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
- vmx_vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET;
- vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx_vcpu->cap.proc_ctls);
- VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
+ if ((vcpu->cap.proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
+ vcpu->cap.proc_ctls |= PROCBASED_TSC_OFFSET;
+ vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vcpu->cap.proc_ctls);
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Enabling TSC offsetting");
}
error = vmwrite(VMCS_TSC_OFFSET, offset);
#ifdef BHYVE_SNAPSHOT
if (error == 0)
- error = vm_set_tsc_offset(vmx->vm, vcpu, offset);
+ error = vm_set_tsc_offset(vmx->vm, vcpu->vcpuid, offset);
#endif
return (error);
}
@@ -1417,7 +1419,7 @@ vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
static void
-vmx_inject_nmi(struct vmx *vmx, int vcpu)
+vmx_inject_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
uint32_t gi __diagused, info;
@@ -1436,33 +1438,32 @@ vmx_inject_nmi(struct vmx *vmx, int vcpu)
info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
vmcs_write(VMCS_ENTRY_INTR_INFO, info);
- VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Injecting vNMI");
/* Clear the request */
- vm_nmi_clear(vmx->vm, vcpu);
+ vm_nmi_clear(vmx->vm, vcpu->vcpuid);
}
static void
-vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
- uint64_t guestrip)
+vmx_inject_interrupts(struct vmx *vmx, struct vmx_vcpu *vcpu,
+ struct vlapic *vlapic, uint64_t guestrip)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
int vector, need_nmi_exiting, extint_pending;
uint64_t rflags, entryinfo;
uint32_t gi, info;
- if (vmx_vcpu->state.nextrip != guestrip) {
+ if (vcpu->state.nextrip != guestrip) {
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
if (gi & HWINTR_BLOCKING) {
VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
"cleared due to rip change: %#lx/%#lx",
- vmx_vcpu->state.nextrip, guestrip);
+ vcpu->state.nextrip, guestrip);
gi &= ~HWINTR_BLOCKING;
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
}
}
- if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
+ if (vm_entry_intinfo(vmx->vm, vcpu->vcpuid, &entryinfo)) {
KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
"intinfo is not valid: %#lx", __func__, entryinfo));
@@ -1487,7 +1488,7 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
vmcs_write(VMCS_ENTRY_INTR_INFO, info);
}
- if (vm_nmi_pending(vmx->vm, vcpu)) {
+ if (vm_nmi_pending(vmx->vm, vcpu->vcpuid)) {
/*
* If there are no conditions blocking NMI injection then
* inject it directly here otherwise enable "NMI window
@@ -1507,19 +1508,20 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
vmx_inject_nmi(vmx, vcpu);
need_nmi_exiting = 0;
} else {
- VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
- "due to VM-entry intr info %#x", info);
+ VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Cannot "
+ "inject NMI due to VM-entry intr info %#x",
+ info);
}
} else {
- VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
- "Guest Interruptibility-state %#x", gi);
+ VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Cannot inject NMI "
+ "due to Guest Interruptibility-state %#x", gi);
}
if (need_nmi_exiting)
vmx_set_nmi_window_exiting(vmx, vcpu);
}
- extint_pending = vm_extint_pending(vmx->vm, vcpu);
+ extint_pending = vm_extint_pending(vmx->vm, vcpu->vcpuid);
if (!extint_pending && virtual_interrupt_delivery) {
vmx_inject_pir(vlapic);
@@ -1531,9 +1533,9 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
* checking for pending interrupts. This is just an optimization and
* not needed for correctness.
*/
- if ((vmx_vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
- VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
- "pending int_window_exiting");
+ if ((vcpu->cap.proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Skip interrupt injection "
+ "due to pending int_window_exiting");
return;
}
@@ -1567,15 +1569,15 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
/* Check RFLAGS.IF and the interruptibility state of the guest */
rflags = vmcs_read(VMCS_GUEST_RFLAGS);
if ((rflags & PSL_I) == 0) {
- VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
- "rflags %#lx", vector, rflags);
+ VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due "
+ "to rflags %#lx", vector, rflags);
goto cantinject;
}
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
if (gi & HWINTR_BLOCKING) {
- VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
- "Guest Interruptibility-state %#x", vector, gi);
+ VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due "
+ "to Guest Interruptibility-state %#x", vector, gi);
goto cantinject;
}
@@ -1588,8 +1590,8 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
* - An exception was injected above.
* - An NMI was injected above or after "NMI window exiting"
*/
- VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
- "VM-entry intr info %#x", vector, info);
+ VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Cannot inject vector %d due "
+ "to VM-entry intr info %#x", vector, info);
goto cantinject;
}
@@ -1602,7 +1604,7 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
/* Update the Local APIC ISR */
vlapic_intr_accepted(vlapic, vector);
} else {
- vm_extint_clear(vmx->vm, vcpu);
+ vm_extint_clear(vmx->vm, vcpu->vcpuid);
vatpic_intr_accepted(vmx->vm, vector);
/*
@@ -1619,7 +1621,8 @@ vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
vmx_set_int_window_exiting(vmx, vcpu);
}
- VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
+ VCPU_CTR1(vmx->vm, vcpu->vcpuid, "Injecting hwintr at vector %d",
+ vector);
return;
@@ -1641,29 +1644,29 @@ cantinject:
* hypervisor needs to restore virtual-NMI blocking before resuming the guest.
*/
static void
-vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
+vmx_restore_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
uint32_t gi;
- VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Restore Virtual-NMI blocking");
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
}
static void
-vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
+vmx_clear_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
uint32_t gi;
- VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Clear Virtual-NMI blocking");
gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
}
static void
-vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
+vmx_assert_nmi_blocking(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
uint32_t gi __diagused;
@@ -1673,13 +1676,14 @@ vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
}
static int
-vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
+vmx_emulate_xsetbv(struct vmx *vmx, struct vmx_vcpu *vcpu,
+ struct vm_exit *vmexit)
{
struct vmxctx *vmxctx;
uint64_t xcrval;
const struct xsave_limits *limits;
- vmxctx = &vmx->vcpus[vcpu].ctx;
+ vmxctx = &vcpu->ctx;
limits = vmm_get_xsave_limits();
/*
@@ -1690,31 +1694,31 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
/* Only xcr0 is supported. */
if (vmxctx->guest_rcx != 0) {
- vm_inject_gp(vmx->vm, vcpu);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
- vm_inject_ud(vmx->vm, vcpu);
+ vm_inject_ud(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
if ((xcrval & ~limits->xcr0_allowed) != 0) {
- vm_inject_gp(vmx->vm, vcpu);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
if (!(xcrval & XFEATURE_ENABLED_X87)) {
- vm_inject_gp(vmx->vm, vcpu);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
/* AVX (YMM_Hi128) requires SSE. */
if (xcrval & XFEATURE_ENABLED_AVX &&
(xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
- vm_inject_gp(vmx->vm, vcpu);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
@@ -1725,7 +1729,7 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
if (xcrval & XFEATURE_AVX512 &&
(xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
(XFEATURE_AVX512 | XFEATURE_AVX)) {
- vm_inject_gp(vmx->vm, vcpu);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
@@ -1735,7 +1739,7 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
*/
if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
- vm_inject_gp(vmx->vm, vcpu);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
return (HANDLED);
}
@@ -1749,11 +1753,11 @@ vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
static uint64_t
-vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
+vmx_get_guest_reg(struct vmx_vcpu *vcpu, int ident)
{
const struct vmxctx *vmxctx;
- vmxctx = &vmx->vcpus[vcpu].ctx;
+ vmxctx = &vcpu->ctx;
switch (ident) {
case 0:
@@ -1794,11 +1798,11 @@ vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
}
static void
-vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
+vmx_set_guest_reg(struct vmx_vcpu *vcpu, int ident, uint64_t regval)
{
struct vmxctx *vmxctx;
- vmxctx = &vmx->vcpus[vcpu].ctx;
+ vmxctx = &vcpu->ctx;
switch (ident) {
case 0:
@@ -1855,7 +1859,7 @@ vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
}
static int
-vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+vmx_emulate_cr0_access(struct vmx_vcpu *vcpu, uint64_t exitqual)
{
uint64_t crval, regval;
@@ -1863,7 +1867,7 @@ vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
if ((exitqual & 0xf0) != 0x00)
return (UNHANDLED);
- regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+ regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf);
vmcs_write(VMCS_CR0_SHADOW, regval);
@@ -1893,7 +1897,7 @@ vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
}
static int
-vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+vmx_emulate_cr4_access(struct vmx_vcpu *vcpu, uint64_t exitqual)
{
uint64_t crval, regval;
@@ -1901,7 +1905,7 @@ vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
if ((exitqual & 0xf0) != 0x00)
return (UNHANDLED);
- regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
+ regval = vmx_get_guest_reg(vcpu, (exitqual >> 8) & 0xf);
vmcs_write(VMCS_CR4_SHADOW, regval);
@@ -1913,7 +1917,8 @@ vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
}
static int
-vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
+vmx_emulate_cr8_access(struct vmx *vmx, struct vmx_vcpu *vcpu,
+ uint64_t exitqual)
{
struct vlapic *vlapic;
uint64_t cr8;
@@ -1924,13 +1929,13 @@ vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
return (UNHANDLED);
}
- vlapic = vm_lapic(vmx->vm, vcpu);
+ vlapic = vm_lapic(vmx->vm, vcpu->vcpuid);
regnum = (exitqual >> 8) & 0xf;
if (exitqual & 0x10) {
cr8 = vlapic_get_cr8(vlapic);
- vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
+ vmx_set_guest_reg(vcpu, regnum, cr8);
} else {
- cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
+ cr8 = vmx_get_guest_reg(vcpu, regnum);
vlapic_set_cr8(vlapic, cr8);
}
@@ -1986,26 +1991,26 @@ vmx_paging_mode(void)
}
static uint64_t
-inout_str_index(struct vmx *vmx, int vcpuid, int in)
+inout_str_index(struct vmx *vmx, struct vmx_vcpu *vcpu, int in)
{
uint64_t val;
int error __diagused;
enum vm_reg_name reg;
reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
- error = vmx_getreg(vmx, vcpuid, reg, &val);
+ error = vmx_getreg(vmx, vcpu, reg, &val);
KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
return (val);
}
static uint64_t
-inout_str_count(struct vmx *vmx, int vcpuid, int rep)
+inout_str_count(struct vmx *vmx, struct vmx_vcpu *vcpu, int rep)
{
uint64_t val;
int error __diagused;
if (rep) {
- error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
+ error = vmx_getreg(vmx, vcpu, VM_REG_GUEST_RCX, &val);
KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
} else {
val = 1;
@@ -2032,8 +2037,8 @@ inout_str_addrsize(uint32_t inst_info)
}
static void
-inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
- struct vm_inout_str *vis)
+inout_str_seginfo(struct vmx *vmx, struct vmx_vcpu *vcpu, uint32_t inst_info,
+ int in, struct vm_inout_str *vis)
{
int error __diagused, s;
@@ -2044,7 +2049,7 @@ inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
vis->seg_name = vm_segment_name(s);
}
- error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
+ error = vmx_getdesc(vmx, vcpu, vis->seg_name, &vis->seg_desc);
KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
}
@@ -2133,25 +2138,25 @@ ept_emulation_fault(uint64_t ept_qual)
}
static __inline int
-apic_access_virtualization(struct vmx *vmx, int vcpuid)
+apic_access_virtualization(struct vmx_vcpu *vcpu)
{
uint32_t proc_ctls2;
- proc_ctls2 = vmx->vcpus[vcpuid].cap.proc_ctls2;
+ proc_ctls2 = vcpu->cap.proc_ctls2;
return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
}
static __inline int
-x2apic_virtualization(struct vmx *vmx, int vcpuid)
+x2apic_virtualization(struct vmx_vcpu *vcpu)
{
uint32_t proc_ctls2;
- proc_ctls2 = vmx->vcpus[vcpuid].cap.proc_ctls2;
+ proc_ctls2 = vcpu->cap.proc_ctls2;
return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
}
static int
-vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
+vmx_handle_apic_write(struct vmx_vcpu *vcpu, struct vlapic *vlapic,
uint64_t qual)
{
int error, handled, offset;
@@ -2161,7 +2166,7 @@ vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
handled = HANDLED;
offset = APIC_WRITE_OFFSET(qual);
- if (!apic_access_virtualization(vmx, vcpuid)) {
+ if (!apic_access_virtualization(vcpu)) {
/*
* In general there should not be any APIC write VM-exits
* unless APIC-access virtualization is enabled.
@@ -2169,7 +2174,7 @@ vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
* However self-IPI virtualization can legitimately trigger
* an APIC-write VM-exit so treat it specially.
*/
- if (x2apic_virtualization(vmx, vcpuid) &&
+ if (x2apic_virtualization(vcpu) &&
offset == APIC_OFFSET_SELF_IPI) {
apic_regs = (uint32_t *)(vlapic->apic_page);
vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
@@ -2219,10 +2224,10 @@ vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
}
static bool
-apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
+apic_access_fault(struct vmx_vcpu *vcpu, uint64_t gpa)
{
- if (apic_access_virtualization(vmx, vcpuid) &&
+ if (apic_access_virtualization(vcpu) &&
(gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
return (true);
else
@@ -2230,12 +2235,12 @@ apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
}
static int
-vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
+vmx_handle_apic_access(struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
{
uint64_t qual;
int access_type, offset, allowed;
- if (!apic_access_virtualization(vmx, vcpuid))
+ if (!apic_access_virtualization(vcpu))
return (UNHANDLED);
qual = vmexit->u.vmx.exit_qualification;
@@ -2316,20 +2321,21 @@ vmx_task_switch_reason(uint64_t qual)
}
static int
-emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
+emulate_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
+ bool *retu)
{
int error;
if (lapic_msr(num))
- error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
+ error = lapic_wrmsr(vmx->vm, vcpu->vcpuid, num, val, retu);
else
- error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
+ error = vmx_wrmsr(vmx, vcpu, num, val, retu);
return (error);
}
static int
-emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
+emulate_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, bool *retu)
{
struct vmxctx *vmxctx;
uint64_t result;
@@ -2337,13 +2343,13 @@ emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
int error;
if (lapic_msr(num))
- error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
+ error = lapic_rdmsr(vmx->vm, vcpu->vcpuid, num, &result, retu);
else
- error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
+ error = vmx_rdmsr(vmx, vcpu, num, &result, retu);
if (error == 0) {
eax = result;
- vmxctx = &vmx->vcpus[vcpuid].ctx;
+ vmxctx = &vcpu->ctx;
error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
@@ -2356,10 +2362,9 @@ emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
}
static int
-vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
+vmx_exit_process(struct vmx *vmx, struct vmx_vcpu *vcpu, struct vm_exit *vmexit)
{
int error, errcode, errcode_valid, handled, in;
- struct vmx_vcpu *vmx_vcpu;
struct vmxctx *vmxctx;
struct vlapic *vlapic;
struct vm_inout_str *vis;
@@ -2367,21 +2372,22 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
uint32_t intr_type, intr_vec, reason;
uint64_t exitintinfo, qual, gpa;
+ int vcpuid;
bool retu;
CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
handled = UNHANDLED;
- vmx_vcpu = &vmx->vcpus[vcpu];
- vmxctx = &vmx_vcpu->ctx;
+ vmxctx = &vcpu->ctx;
+ vcpuid = vcpu->vcpuid;
qual = vmexit->u.vmx.exit_qualification;
reason = vmexit->u.vmx.exit_reason;
vmexit->exitcode = VM_EXITCODE_BOGUS;
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
- SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_COUNT, 1);
+ SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpuid, vmexit);
/*
* VM-entry failures during or after loading guest state.
@@ -2390,7 +2396,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* as most VM-exit fields are not populated as usual.
*/
if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
- VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
+ VCPU_CTR0(vmx->vm, vcpuid, "Handling MCE during VM-entry");
__asm __volatile("int $18");
return (1);
}
@@ -2411,7 +2417,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
idtvec_err = vmcs_idt_vectoring_err();
exitintinfo |= (uint64_t)idtvec_err << 32;
}
- error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
+ error = vm_exit_intinfo(vmx->vm, vcpuid, exitintinfo);
KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
__func__, error));
@@ -2484,21 +2490,21 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
}
vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
- SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts);
- VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
+ SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpuid, vmexit, ts);
+ VCPU_CTR4(vmx->vm, vcpuid, "task switch reason %d, tss 0x%04x, "
"%s errcode 0x%016lx", ts->reason, ts->tsssel,
ts->ext ? "external" : "internal",
((uint64_t)ts->errcode << 32) | ts->errcode_valid);
break;
case EXIT_REASON_CR_ACCESS:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
- SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CR_ACCESS, 1);
+ SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpuid, vmexit, qual);
switch (qual & 0xf) {
case 0:
- handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
+ handled = vmx_emulate_cr0_access(vcpu, qual);
break;
case 4:
- handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
+ handled = vmx_emulate_cr4_access(vcpu, qual);
break;
case 8:
handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
@@ -2506,11 +2512,11 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
break;
case EXIT_REASON_RDMSR:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1);
retu = false;
ecx = vmxctx->guest_rcx;
- VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
- SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx);
+ VCPU_CTR1(vmx->vm, vcpuid, "rdmsr 0x%08x", ecx);
+ SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpuid, vmexit, ecx);
error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
if (error) {
vmexit->exitcode = VM_EXITCODE_RDMSR;
@@ -2524,14 +2530,14 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
break;
case EXIT_REASON_WRMSR:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1);
retu = false;
eax = vmxctx->guest_rax;
ecx = vmxctx->guest_rcx;
edx = vmxctx->guest_rdx;
- VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
+ VCPU_CTR2(vmx->vm, vcpuid, "wrmsr 0x%08x value 0x%016lx",
ecx, (uint64_t)edx << 32 | eax);
- SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx,
+ SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpuid, ecx,
(uint64_t)edx << 32 | eax);
error = emulate_wrmsr(vmx, vcpu, ecx,
(uint64_t)edx << 32 | eax, &retu);
@@ -2548,8 +2554,8 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
break;
case EXIT_REASON_HLT:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
- SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_HLT, 1);
+ SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_HLT;
vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
if (virtual_interrupt_delivery)
@@ -2559,19 +2565,19 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vmexit->u.hlt.intr_status = 0;
break;
case EXIT_REASON_MTF:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
- SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_MTRAP, 1);
+ SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_MTRAP;
vmexit->inst_length = 0;
break;
case EXIT_REASON_PAUSE:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
- SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_PAUSE, 1);
+ SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_PAUSE;
break;
case EXIT_REASON_INTR_WINDOW:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
- SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INTR_WINDOW, 1);
+ SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit);
vmx_clear_int_window_exiting(vmx, vcpu);
return (1);
case EXIT_REASON_EXT_INTR:
@@ -2586,7 +2592,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
*/
intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
SDT_PROBE4(vmm, vmx, exit, interrupt,
- vmx, vcpu, vmexit, intr_info);
+ vmx, vcpuid, vmexit, intr_info);
/*
* XXX: Ignore this exit if VMCS_INTR_VALID is not set.
@@ -2603,18 +2609,18 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* This is special. We want to treat this as an 'handled'
* VM-exit but not increment the instruction pointer.
*/
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXTINT, 1);
return (1);
case EXIT_REASON_NMI_WINDOW:
- SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit);
/* Exit to allow the pending virtual NMI to be injected */
- if (vm_nmi_pending(vmx->vm, vcpu))
+ if (vm_nmi_pending(vmx->vm, vcpuid))
vmx_inject_nmi(vmx, vcpu);
vmx_clear_nmi_window_exiting(vmx, vcpu);
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NMI_WINDOW, 1);
return (1);
case EXIT_REASON_INOUT:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INOUT, 1);
vmexit->exitcode = VM_EXITCODE_INOUT;
vmexit->u.inout.bytes = (qual & 0x7) + 1;
vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
@@ -2634,15 +2640,15 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
vis->addrsize = inout_str_addrsize(inst_info);
inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
}
- SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit);
break;
case EXIT_REASON_CPUID:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
- SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit);
- handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CPUID, 1);
+ SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit);
+ handled = vmx_handle_cpuid(vmx->vm, vcpuid, vmxctx);
break;
case EXIT_REASON_EXCEPTION:
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXCEPTION, 1);
intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
KASSERT((intr_info & VMCS_INTR_VALID) != 0,
("VM exit interruption info invalid: %#x", intr_info));
@@ -2675,7 +2681,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* the machine check back into the guest.
*/
if (intr_vec == IDT_MC) {
- VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
+ VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to MCE handler");
__asm __volatile("int $18");
return (1);
}
@@ -2685,7 +2691,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* debug exceptions, bounce them out to userland.
*/
if (intr_type == VMCS_INTR_T_SWEXCEPTION && intr_vec == IDT_BP &&
- (vmx_vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) {
+ (vcpu->cap.set & (1 << VM_CAP_BPT_EXIT))) {
vmexit->exitcode = VM_EXITCODE_BPT;
vmexit->u.bpt.inst_length = vmexit->inst_length;
vmexit->inst_length = 0;
@@ -2713,11 +2719,11 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
errcode_valid = 1;
errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
}
- VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
+ VCPU_CTR2(vmx->vm, vcpuid, "Reflecting exception %d/%#x into "
"the guest", intr_vec, errcode);
SDT_PROBE5(vmm, vmx, exit, exception,
- vmx, vcpu, vmexit, intr_vec, errcode);
- error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
+ vmx, vcpuid, vmexit, intr_vec, errcode);
+ error = vm_inject_exception(vmx->vm, vcpuid, intr_vec,
errcode_valid, errcode, 0);
KASSERT(error == 0, ("%s: vm_inject_exception error %d",
__func__, error));
@@ -2730,20 +2736,20 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* this must be an instruction that accesses MMIO space.
*/
gpa = vmcs_gpa();
- if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
- apic_access_fault(vmx, vcpu, gpa)) {
+ if (vm_mem_allocated(vmx->vm, vcpuid, gpa) ||
+ apic_access_fault(vcpu, gpa)) {
vmexit->exitcode = VM_EXITCODE_PAGING;
vmexit->inst_length = 0;
vmexit->u.paging.gpa = gpa;
vmexit->u.paging.fault_type = ept_fault_type(qual);
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NESTED_FAULT, 1);
SDT_PROBE5(vmm, vmx, exit, nestedfault,
- vmx, vcpu, vmexit, gpa, qual);
+ vmx, vcpuid, vmexit, gpa, qual);
} else if (ept_emulation_fault(qual)) {
vmexit_inst_emul(vmexit, gpa, vmcs_gla());
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INST_EMUL, 1);
SDT_PROBE4(vmm, vmx, exit, mmiofault,
- vmx, vcpu, vmexit, gpa);
+ vmx, vcpuid, vmexit, gpa);
}
/*
* If Virtual NMIs control is 1 and the VM-exit is due to an
@@ -2760,12 +2766,12 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
case EXIT_REASON_VIRTUALIZED_EOI:
vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
vmexit->u.ioapic_eoi.vector = qual & 0xFF;
- SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpuid, vmexit);
vmexit->inst_length = 0; /* trap-like */
break;
case EXIT_REASON_APIC_ACCESS:
- SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit);
- handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpuid, vmexit);
+ handled = vmx_handle_apic_access(vcpu, vmexit);
break;
case EXIT_REASON_APIC_WRITE:
/*
@@ -2773,25 +2779,25 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
* pointing to the next instruction.
*/
vmexit->inst_length = 0;
- vlapic = vm_lapic(vmx->vm, vcpu);
+ vlapic = vm_lapic(vmx->vm, vcpuid);
SDT_PROBE4(vmm, vmx, exit, apicwrite,
- vmx, vcpu, vmexit, vlapic);
- handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
+ vmx, vcpuid, vmexit, vlapic);
+ handled = vmx_handle_apic_write(vcpu, vlapic, qual);
break;
case EXIT_REASON_XSETBV:
- SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpuid, vmexit);
handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
break;
case EXIT_REASON_MONITOR:
- SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_MONITOR;
break;
case EXIT_REASON_MWAIT:
- SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_MWAIT;
break;
case EXIT_REASON_TPR:
- vlapic = vm_lapic(vmx->vm, vcpu);
+ vlapic = vm_lapic(vmx->vm, vcpuid);
vlapic_sync_tpr(vlapic);
vmexit->inst_length = 0;
handled = HANDLED;
@@ -2806,7 +2812,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMXOFF:
case EXIT_REASON_VMXON:
- SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit);
+ SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_VMINSN;
break;
case EXIT_REASON_INVD:
@@ -2816,8 +2822,8 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
break;
default:
SDT_PROBE4(vmm, vmx, exit, unknown,
- vmx, vcpu, vmexit, reason);
- vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
+ vmx, vcpuid, vmexit, reason);
+ vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_UNKNOWN, 1);
break;
}
@@ -2854,7 +2860,7 @@ vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
}
SDT_PROBE4(vmm, vmx, exit, return,
- vmx, vcpu, vmexit, handled);
+ vmx, vcpuid, vmexit, handled);
return (handled);
}
@@ -2892,7 +2898,8 @@ vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
* clear NMI blocking.
*/
static __inline void
-vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
+vmx_exit_handle_nmi(struct vmx *vmx, struct vmx_vcpu *vcpu,
+ struct vm_exit *vmexit)
{
uint32_t intr_info;
@@ -2908,7 +2915,7 @@ vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
"to NMI has invalid vector: %#x", intr_info));
- VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
+ VCPU_CTR0(vmx->vm, vcpu->vcpuid, "Vectoring to NMI handler");
__asm __volatile("int $2");
}
}
@@ -3006,12 +3013,12 @@ vmx_pmap_deactivate(struct vmx *vmx, pmap_t pmap)
}
static int
-vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
+vmx_run(void *arg, void *vcpui, register_t rip, pmap_t pmap,
struct vm_eventinfo *evinfo)
{
- int rc, handled, launched;
+ int rc, handled, launched, vcpuid;
struct vmx *vmx;
- struct vmx_vcpu *vmx_vcpu;
+ struct vmx_vcpu *vcpu;
struct vm *vm;
struct vmxctx *vmxctx;
struct vmcs *vmcs;
@@ -3023,11 +3030,12 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
vmx = arg;
vm = vmx->vm;
- vmx_vcpu = &vmx->vcpus[vcpu];
- vmcs = vmx_vcpu->vmcs;
- vmxctx = &vmx_vcpu->ctx;
- vlapic = vm_lapic(vm, vcpu);
- vmexit = vm_exitinfo(vm, vcpu);
+ vcpu = vcpui;
+ vcpuid = vcpu->vcpuid;
+ vmcs = vcpu->vmcs;
+ vmxctx = &vcpu->ctx;
+ vlapic = vm_lapic(vm, vcpuid);
+ vmexit = vm_exitinfo(vm, vcpuid);
launched = 0;
KASSERT(vmxctx->pmap == pmap,
@@ -3082,33 +3090,33 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
*/
if (vcpu_suspended(evinfo)) {
enable_intr();
- vm_exit_suspended(vmx->vm, vcpu, rip);
+ vm_exit_suspended(vmx->vm, vcpuid, rip);
break;
}
if (vcpu_rendezvous_pending(evinfo)) {
enable_intr();
- vm_exit_rendezvous(vmx->vm, vcpu, rip);
+ vm_exit_rendezvous(vmx->vm, vcpuid, rip);
break;
}
if (vcpu_reqidle(evinfo)) {
enable_intr();
- vm_exit_reqidle(vmx->vm, vcpu, rip);
+ vm_exit_reqidle(vmx->vm, vcpuid, rip);
break;
}
- if (vcpu_should_yield(vm, vcpu)) {
+ if (vcpu_should_yield(vm, vcpuid)) {
enable_intr();
- vm_exit_astpending(vmx->vm, vcpu, rip);
+ vm_exit_astpending(vmx->vm, vcpuid, rip);
vmx_astpending_trace(vmx, vcpu, rip);
handled = HANDLED;
break;
}
- if (vcpu_debugged(vm, vcpu)) {
+ if (vcpu_debugged(vm, vcpuid)) {
enable_intr();
- vm_exit_debug(vmx->vm, vcpu, rip);
+ vm_exit_debug(vmx->vm, vcpuid, rip);
break;
}
@@ -3117,7 +3125,7 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
* must be updated right before entering the guest.
*/
if (tpr_shadowing && !virtual_interrupt_delivery) {
- if ((vmx_vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) {
+ if ((vcpu->cap.proc_ctls & PROCBASED_USE_TPR_SHADOW) != 0) {
vmcs_write(VMCS_TPR_THRESHOLD, vlapic_get_cr8(vlapic));
}
}
@@ -3181,7 +3189,7 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
/* Update 'nextrip' */
- vmx_vcpu->state.nextrip = rip;
+ vcpu->state.nextrip = rip;
if (rc == VMX_GUEST_VMEXIT) {
vmx_exit_handle_nmi(vmx, vcpu, vmexit);
@@ -3206,7 +3214,7 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
handled, vmexit->exitcode);
}
- VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
+ VCPU_CTR1(vm, vcpuid, "returning from vmx_run: exitcode %d",
vmexit->exitcode);
VMCLEAR(vmcs);
@@ -3216,25 +3224,25 @@ vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
}
static void
+vmx_vcpu_cleanup(void *arg, void *vcpui)
+{
+ struct vmx_vcpu *vcpu = vcpui;
+
+ vpid_free(vcpu->state.vpid);
+ free(vcpu->pir_desc, M_VMX);
+ free(vcpu->apic_page, M_VMX);
+ free(vcpu->vmcs, M_VMX);
+ free(vcpu, M_VMX);
+}
+
+static void
vmx_cleanup(void *arg)
{
- int i;
- struct vmx_vcpu *vcpu;
struct vmx *vmx = arg;
- uint16_t maxcpus;
- if (apic_access_virtualization(vmx, 0))
+ if (virtual_interrupt_delivery)
vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
- maxcpus = vm_get_maxcpus(vmx->vm);
- for (i = 0; i < maxcpus; i++) {
- vcpu = &vmx->vcpus[i];
- vpid_free(vcpu->state.vpid);
- free(vcpu->pir_desc, M_VMX);
- free(vcpu->apic_page, M_VMX);
- free(vcpu->vmcs, M_VMX);
- }
-
free(vmx->msr_bitmap, M_VMX);
free(vmx, M_VMX);
@@ -3319,19 +3327,20 @@ vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
}
static int
-vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
+vmx_get_intr_shadow(struct vmx_vcpu *vcpu, int running, uint64_t *retval)
{
uint64_t gi;
int error;
- error = vmcs_getreg(vmx->vcpus[vcpu].vmcs, running,
+ error = vmcs_getreg(vcpu->vmcs, running,
VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
*retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
return (error);
}
static int
-vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
+vmx_modify_intr_shadow(struct vmx *vmx, struct vmx_vcpu *vcpu, int running,
+ uint64_t val)
{
struct vmcs *vmcs;
uint64_t gi;
@@ -3345,7 +3354,7 @@ vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
goto done;
}
- vmcs = vmx->vcpus[vcpu].vmcs;
+ vmcs = vcpu->vmcs;
ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
error = vmcs_getreg(vmcs, running, ident, &gi);
if (error == 0) {
@@ -3353,7 +3362,7 @@ vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
error = vmcs_setreg(vmcs, running, ident, gi);
}
done:
- VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
+ VCPU_CTR2(vmx->vm, vcpu->vcpuid, "Setting intr_shadow to %#lx %s", val,
error ? "failed" : "succeeded");
return (error);
}
@@ -3380,48 +3389,51 @@ vmx_shadow_reg(int reg)
}
static int
-vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
+vmx_getreg(void *arg, void *vcpui, int reg, uint64_t *retval)
{
int running, hostcpu;
struct vmx *vmx = arg;
+ struct vmx_vcpu *vcpu = vcpui;
- running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+ running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
- panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
+ panic("vmx_getreg: %s%d is running", vm_name(vmx->vm),
+ vcpu->vcpuid);
if (reg == VM_REG_GUEST_INTR_SHADOW)
- return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
+ return (vmx_get_intr_shadow(vcpu, running, retval));
- if (vmxctx_getreg(&vmx->vcpus[vcpu].ctx, reg, retval) == 0)
+ if (vmxctx_getreg(&vcpu->ctx, reg, retval) == 0)
return (0);
- return (vmcs_getreg(vmx->vcpus[vcpu].vmcs, running, reg, retval));
+ return (vmcs_getreg(vcpu->vmcs, running, reg, retval));
}
static int
-vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
+vmx_setreg(void *arg, void *vcpui, int reg, uint64_t val)
{
int error, hostcpu, running, shadow;
uint64_t ctls;
pmap_t pmap;
struct vmx *vmx = arg;
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
+ struct vmx_vcpu *vcpu = vcpui;
- running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+ running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
- panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
+ panic("vmx_setreg: %s%d is running", vm_name(vmx->vm),
+ vcpu->vcpuid);
if (reg == VM_REG_GUEST_INTR_SHADOW)
return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
- if (vmxctx_setreg(&vmx_vcpu->ctx, reg, val) == 0)
+ if (vmxctx_setreg(&vcpu->ctx, reg, val) == 0)
return (0);
/* Do not permit user write access to VMCS fields by offset. */
if (reg < 0)
return (EINVAL);
- error = vmcs_setreg(vmx_vcpu->vmcs, running, reg, val);
+ error = vmcs_setreg(vcpu->vmcs, running, reg, val);
if (error == 0) {
/*
@@ -3431,13 +3443,13 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
*/
if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
(reg == VM_REG_GUEST_EFER)) {
- vmcs_getreg(vmx_vcpu->vmcs, running,
+ vmcs_getreg(vcpu->vmcs, running,
VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
if (val & EFER_LMA)
ctls |= VM_ENTRY_GUEST_LMA;
else
ctls &= ~VM_ENTRY_GUEST_LMA;
- vmcs_setreg(vmx_vcpu->vmcs, running,
+ vmcs_setreg(vcpu->vmcs, running,
VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
}
@@ -3446,7 +3458,7 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
/*
* Store the unmodified value in the shadow
*/
- error = vmcs_setreg(vmx_vcpu->vmcs, running,
+ error = vmcs_setreg(vcpu->vmcs, running,
VMCS_IDENT(shadow), val);
}
@@ -3458,7 +3470,7 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
* XXX the processor retains global mappings when %cr3
* is updated but vmx_invvpid() does not.
*/
- pmap = vmx_vcpu->ctx.pmap;
+ pmap = vcpu->ctx.pmap;
vmx_invvpid(vmx, vcpu, pmap, running);
}
}
@@ -3467,41 +3479,45 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
}
static int
-vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+vmx_getdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
{
int hostcpu, running;
struct vmx *vmx = arg;
+ struct vmx_vcpu *vcpu = vcpui;
- running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+ running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
- panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
+ panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm),
+ vcpu->vcpuid);
- return (vmcs_getdesc(vmx->vcpus[vcpu].vmcs, running, reg, desc));
+ return (vmcs_getdesc(vcpu->vmcs, running, reg, desc));
}
static int
-vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
+vmx_setdesc(void *arg, void *vcpui, int reg, struct seg_desc *desc)
{
int hostcpu, running;
struct vmx *vmx = arg;
+ struct vmx_vcpu *vcpu = vcpui;
- running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+ running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu)
- panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
+ panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm),
+ vcpu->vcpuid);
- return (vmcs_setdesc(vmx->vcpus[vcpu].vmcs, running, reg, desc));
+ return (vmcs_setdesc(vcpu->vmcs, running, reg, desc));
}
static int
-vmx_getcap(void *arg, int vcpu, int type, int *retval)
+vmx_getcap(void *arg, void *vcpui, int type, int *retval)
{
- struct vmx *vmx = arg;
+ struct vmx_vcpu *vcpu = vcpui;
int vcap;
int ret;
ret = ENOENT;
- vcap = vmx->vcpus[vcpu].cap.set;
+ vcap = vcpu->cap.set;
switch (type) {
case VM_CAP_HALT_EXIT:
@@ -3547,11 +3563,11 @@ vmx_getcap(void *arg, int vcpu, int type, int *retval)
}
static int
-vmx_setcap(void *arg, int vcpu, int type, int val)
+vmx_setcap(void *arg, void *vcpui, int type, int val)
{
struct vmx *vmx = arg;
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpu];
- struct vmcs *vmcs = vmx_vcpu->vmcs;
+ struct vmx_vcpu *vcpu = vcpui;
+ struct vmcs *vmcs = vcpu->vmcs;
struct vlapic *vlapic;
uint32_t baseval;
uint32_t *pptr;
@@ -3567,7 +3583,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
case VM_CAP_HALT_EXIT:
if (cap_halt_exit) {
retval = 0;
- pptr = &vmx_vcpu->cap.proc_ctls;
+ pptr = &vcpu->cap.proc_ctls;
baseval = *pptr;
flag = PROCBASED_HLT_EXITING;
reg = VMCS_PRI_PROC_BASED_CTLS;
@@ -3576,7 +3592,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
case VM_CAP_MTRAP_EXIT:
if (cap_monitor_trap) {
retval = 0;
- pptr = &vmx_vcpu->cap.proc_ctls;
+ pptr = &vcpu->cap.proc_ctls;
baseval = *pptr;
flag = PROCBASED_MTF;
reg = VMCS_PRI_PROC_BASED_CTLS;
@@ -3585,7 +3601,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
case VM_CAP_PAUSE_EXIT:
if (cap_pause_exit) {
retval = 0;
- pptr = &vmx_vcpu->cap.proc_ctls;
+ pptr = &vcpu->cap.proc_ctls;
baseval = *pptr;
flag = PROCBASED_PAUSE_EXITING;
reg = VMCS_PRI_PROC_BASED_CTLS;
@@ -3605,7 +3621,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
case VM_CAP_UNRESTRICTED_GUEST:
if (cap_unrestricted_guest) {
retval = 0;
- pptr = &vmx_vcpu->cap.proc_ctls2;
+ pptr = &vcpu->cap.proc_ctls2;
baseval = *pptr;
flag = PROCBASED2_UNRESTRICTED_GUEST;
reg = VMCS_SEC_PROC_BASED_CTLS;
@@ -3614,7 +3630,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
case VM_CAP_ENABLE_INVPCID:
if (cap_invpcid) {
retval = 0;
- pptr = &vmx_vcpu->cap.proc_ctls2;
+ pptr = &vcpu->cap.proc_ctls2;
baseval = *pptr;
flag = PROCBASED2_ENABLE_INVPCID;
reg = VMCS_SEC_PROC_BASED_CTLS;
@@ -3624,8 +3640,8 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
retval = 0;
/* Don't change the bitmap if we are tracing all exceptions. */
- if (vmx_vcpu->cap.exc_bitmap != 0xffffffff) {
- pptr = &vmx_vcpu->cap.exc_bitmap;
+ if (vcpu->cap.exc_bitmap != 0xffffffff) {
+ pptr = &vcpu->cap.exc_bitmap;
baseval = *pptr;
flag = (1 << IDT_BP);
reg = VMCS_EXCEPTION_BITMAP;
@@ -3634,7 +3650,7 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
case VM_CAP_IPI_EXIT:
retval = 0;
- vlapic = vm_lapic(vmx->vm, vcpu);
+ vlapic = vm_lapic(vmx->vm, vcpu->vcpuid);
vlapic->ipi_exit = val;
break;
default:
@@ -3665,9 +3681,9 @@ vmx_setcap(void *arg, int vcpu, int type, int val)
}
if (val) {
- vmx_vcpu->cap.set |= (1 << type);
+ vcpu->cap.set |= (1 << type);
} else {
- vmx_vcpu->cap.set &= ~(1 << type);
+ vcpu->cap.set &= ~(1 << type);
}
return (0);
@@ -3689,6 +3705,7 @@ struct vlapic_vtx {
struct vlapic vlapic;
struct pir_desc *pir_desc;
struct vmx *vmx;
+ struct vmx_vcpu *vcpu;
u_int pending_prio;
};
@@ -3862,7 +3879,6 @@ static void
vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
{
struct vlapic_vtx *vlapic_vtx;
- struct vmx *vmx;
struct vmcs *vmcs;
uint64_t mask, val;
@@ -3871,8 +3887,7 @@ vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
("vmx_set_tmr: vcpu cannot be running"));
vlapic_vtx = (struct vlapic_vtx *)vlapic;
- vmx = vlapic_vtx->vmx;
- vmcs = vmx->vcpus[vlapic->vcpuid].vmcs;
+ vmcs = vlapic_vtx->vcpu->vmcs;
mask = 1UL << (vector % 64);
VMPTRLD(vmcs);
@@ -3888,15 +3903,13 @@ vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
static void
vmx_enable_x2apic_mode_ts(struct vlapic *vlapic)
{
- struct vmx *vmx;
+ struct vlapic_vtx *vlapic_vtx;
struct vmx_vcpu *vcpu;
struct vmcs *vmcs;
uint32_t proc_ctls;
- int vcpuid;
- vcpuid = vlapic->vcpuid;
- vmx = ((struct vlapic_vtx *)vlapic)->vmx;
- vcpu = &vmx->vcpus[vcpuid];
+ vlapic_vtx = (struct vlapic_vtx *)vlapic;
+ vcpu = vlapic_vtx->vcpu;
vmcs = vcpu->vmcs;
proc_ctls = vcpu->cap.proc_ctls;
@@ -3913,15 +3926,16 @@ vmx_enable_x2apic_mode_ts(struct vlapic *vlapic)
static void
vmx_enable_x2apic_mode_vid(struct vlapic *vlapic)
{
+ struct vlapic_vtx *vlapic_vtx;
struct vmx *vmx;
struct vmx_vcpu *vcpu;
struct vmcs *vmcs;
uint32_t proc_ctls2;
- int vcpuid, error __diagused;
+ int error __diagused;
- vcpuid = vlapic->vcpuid;
- vmx = ((struct vlapic_vtx *)vlapic)->vmx;
- vcpu = &vmx->vcpus[vcpuid];
+ vlapic_vtx = (struct vlapic_vtx *)vlapic;
+ vmx = vlapic_vtx->vmx;
+ vcpu = vlapic_vtx->vcpu;
vmcs = vcpu->vmcs;
proc_ctls2 = vcpu->cap.proc_ctls2;
@@ -4057,22 +4071,25 @@ vmx_inject_pir(struct vlapic *vlapic)
}
static struct vlapic *
-vmx_vlapic_init(void *arg, int vcpuid)
+vmx_vlapic_init(void *arg, void *vcpui)
{
struct vmx *vmx;
+ struct vmx_vcpu *vcpu;
struct vlapic *vlapic;
struct vlapic_vtx *vlapic_vtx;
vmx = arg;
+ vcpu = vcpui;
vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
vlapic->vm = vmx->vm;
- vlapic->vcpuid = vcpuid;
- vlapic->apic_page = (struct LAPIC *)vmx->vcpus[vcpuid].apic_page;
+ vlapic->vcpuid = vcpu->vcpuid;
+ vlapic->apic_page = (struct LAPIC *)vcpu->apic_page;
vlapic_vtx = (struct vlapic_vtx *)vlapic;
- vlapic_vtx->pir_desc = vmx->vcpus[vcpuid].pir_desc;
+ vlapic_vtx->pir_desc = vcpu->pir_desc;
vlapic_vtx->vmx = vmx;
+ vlapic_vtx->vcpu = vcpu;
if (tpr_shadowing) {
vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode_ts;
@@ -4110,7 +4127,7 @@ vmx_snapshot(void *arg, struct vm_snapshot_meta *meta)
}
static int
-vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpuid)
+vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, void *vcpui)
{
struct vmcs *vmcs;
struct vmx *vmx;
@@ -4119,16 +4136,16 @@ vmx_vcpu_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpuid)
int err, run, hostcpu;
vmx = (struct vmx *)arg;
+ vcpu = vcpui;
err = 0;
KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
- vcpu = &vmx->vcpus[vcpuid];
vmcs = vcpu->vmcs;
- run = vcpu_is_running(vmx->vm, vcpuid, &hostcpu);
+ run = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (run && hostcpu != curcpu) {
printf("%s: %s%d is running", __func__, vm_name(vmx->vm),
- vcpuid);
+ vcpu->vcpuid);
return (EINVAL);
}
@@ -4218,18 +4235,20 @@ done:
}
static int
-vmx_restore_tsc(void *arg, int vcpu, uint64_t offset)
+vmx_restore_tsc(void *arg, void *vcpui, uint64_t offset)
{
struct vmcs *vmcs;
struct vmx *vmx = (struct vmx *)arg;
+ struct vmx_vcpu *vcpu = vcpui;
int error, running, hostcpu;
KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
- vmcs = vmx->vcpus[vcpu].vmcs;
+ vmcs = vcpu->vmcs;
- running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
+ running = vcpu_is_running(vmx->vm, vcpu->vcpuid, &hostcpu);
if (running && hostcpu != curcpu) {
- printf("%s: %s%d is running", __func__, vm_name(vmx->vm), vcpu);
+ printf("%s: %s%d is running", __func__, vm_name(vmx->vm),
+ vcpu->vcpuid);
return (EINVAL);
}
@@ -4251,6 +4270,8 @@ const struct vmm_ops vmm_ops_intel = {
.init = vmx_init,
.run = vmx_run,
.cleanup = vmx_cleanup,
+ .vcpu_init = vmx_vcpu_init,
+ .vcpu_cleanup = vmx_vcpu_cleanup,
.getreg = vmx_getreg,
.setreg = vmx_setreg,
.getdesc = vmx_getdesc,
diff --git a/sys/amd64/vmm/intel/vmx.h b/sys/amd64/vmm/intel/vmx.h
index 39594473cd43..32410dc1810e 100644
--- a/sys/amd64/vmm/intel/vmx.h
+++ b/sys/amd64/vmm/intel/vmx.h
@@ -31,6 +31,9 @@
#ifndef _VMX_H_
#define _VMX_H_
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
#include "vmcs.h"
#include "x86.h"
@@ -131,15 +134,17 @@ struct vmx_vcpu {
struct vmxcap cap;
struct vmxstate state;
struct vm_mtrr mtrr;
+ int vcpuid;
};
/* virtual machine softc */
struct vmx {
- struct vmx_vcpu vcpus[VM_MAXCPU];
+ struct vm *vm;
char *msr_bitmap;
uint64_t eptp;
- struct vm *vm;
long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
+ pmap_t pmap;
+ uint16_t vpids[VM_MAXCPU];
};
extern bool vmx_have_msr_tsc_aux;
@@ -153,7 +158,8 @@ void vmx_call_isr(uintptr_t entry);
u_long vmx_fix_cr0(u_long cr0);
u_long vmx_fix_cr4(u_long cr4);
-int vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset);
+int vmx_set_tsc_offset(struct vmx *vmx, struct vmx_vcpu *vcpu,
+ uint64_t offset);
extern char vmx_exit_guest[];
extern char vmx_exit_guest_flush_rsb[];
diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c
index 40f0057f2cdd..8fba9be7d57e 100644
--- a/sys/amd64/vmm/intel/vmx_msr.c
+++ b/sys/amd64/vmm/intel/vmx_msr.c
@@ -314,15 +314,13 @@ vmx_msr_init(void)
}
void
-vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
+vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
-
/*
* The permissions bitmap is shared between all vcpus so initialize it
* once when initializing the vBSP.
*/
- if (vcpuid == 0) {
+ if (vcpu->vcpuid == 0) {
guest_msr_rw(vmx, MSR_LSTAR);
guest_msr_rw(vmx, MSR_CSTAR);
guest_msr_rw(vmx, MSR_STAR);
@@ -333,7 +331,7 @@ vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
/*
* Initialize guest IA32_PAT MSR with default value after reset.
*/
- vmx_vcpu->guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
+ vcpu->guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) |
PAT_VALUE(1, PAT_WRITE_THROUGH) |
PAT_VALUE(2, PAT_UNCACHED) |
PAT_VALUE(3, PAT_UNCACHEABLE) |
@@ -346,24 +344,22 @@ vmx_msr_guest_init(struct vmx *vmx, int vcpuid)
}
void
-vmx_msr_guest_enter(struct vmx *vmx, int vcpuid)
+vmx_msr_guest_enter(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
/* Save host MSRs (in particular, KGSBASE) and restore guest MSRs */
update_pcb_bases(curpcb);
- wrmsr(MSR_LSTAR, vmx_vcpu->guest_msrs[IDX_MSR_LSTAR]);
- wrmsr(MSR_CSTAR, vmx_vcpu->guest_msrs[IDX_MSR_CSTAR]);
- wrmsr(MSR_STAR, vmx_vcpu->guest_msrs[IDX_MSR_STAR]);
- wrmsr(MSR_SF_MASK, vmx_vcpu->guest_msrs[IDX_MSR_SF_MASK]);
- wrmsr(MSR_KGSBASE, vmx_vcpu->guest_msrs[IDX_MSR_KGSBASE]);
+ wrmsr(MSR_LSTAR, vcpu->guest_msrs[IDX_MSR_LSTAR]);
+ wrmsr(MSR_CSTAR, vcpu->guest_msrs[IDX_MSR_CSTAR]);
+ wrmsr(MSR_STAR, vcpu->guest_msrs[IDX_MSR_STAR]);
+ wrmsr(MSR_SF_MASK, vcpu->guest_msrs[IDX_MSR_SF_MASK]);
+ wrmsr(MSR_KGSBASE, vcpu->guest_msrs[IDX_MSR_KGSBASE]);
}
void
-vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, int vcpuid)
+vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
- uint64_t guest_tsc_aux = vmx_vcpu->guest_msrs[IDX_MSR_TSC_AUX];
+ uint64_t guest_tsc_aux = vcpu->guest_msrs[IDX_MSR_TSC_AUX];
uint32_t host_aux = cpu_auxmsr();
if (vmx_have_msr_tsc_aux && guest_tsc_aux != host_aux)
@@ -371,16 +367,15 @@ vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, int vcpuid)
}
void
-vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
+vmx_msr_guest_exit(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
/* Save guest MSRs */
- vmx_vcpu->guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
- vmx_vcpu->guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
- vmx_vcpu->guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
- vmx_vcpu->guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
- vmx_vcpu->guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
+ vcpu->guest_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);
+ vcpu->guest_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);
+ vcpu->guest_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);
+ vcpu->guest_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);
+ vcpu->guest_msrs[IDX_MSR_KGSBASE] = rdmsr(MSR_KGSBASE);
/* Restore host MSRs */
wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);
@@ -392,16 +387,15 @@ vmx_msr_guest_exit(struct vmx *vmx, int vcpuid)
}
void
-vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, int vcpuid)
+vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
- uint64_t guest_tsc_aux = vmx_vcpu->guest_msrs[IDX_MSR_TSC_AUX];
+ uint64_t guest_tsc_aux = vcpu->guest_msrs[IDX_MSR_TSC_AUX];
uint32_t host_aux = cpu_auxmsr();
if (vmx_have_msr_tsc_aux && guest_tsc_aux != host_aux)
/*
* Note that it is not necessary to save the guest value
- * here; vmx->guest_msrs[vcpuid][IDX_MSR_TSC_AUX] always
+ * here; vcpu->guest_msrs[IDX_MSR_TSC_AUX] always
* contains the current value since it is updated whenever
* the guest writes to it (which is expected to be very
* rare).
@@ -410,9 +404,9 @@ vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, int vcpuid)
}
int
-vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
+vmx_rdmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t *val,
+ bool *retu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
int error;
error = 0;
@@ -428,8 +422,8 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
- if (vm_rdmtrr(&vmx_vcpu->mtrr, num, val) != 0) {
- vm_inject_gp(vmx->vm, vcpuid);
+ if (vm_rdmtrr(&vcpu->mtrr, num, val) != 0) {
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
}
break;
case MSR_IA32_MISC_ENABLE:
@@ -443,7 +437,7 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
*val = turbo_ratio_limit;
break;
case MSR_PAT:
- *val = vmx_vcpu->guest_msrs[IDX_MSR_PAT];
+ *val = vcpu->guest_msrs[IDX_MSR_PAT];
break;
default:
error = EINVAL;
@@ -453,9 +447,9 @@ vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu)
}
int
-vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
+vmx_wrmsr(struct vmx *vmx, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
+ bool *retu)
{
- struct vmx_vcpu *vmx_vcpu = &vmx->vcpus[vcpuid];
uint64_t changed;
int error;
@@ -471,8 +465,8 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
- if (vm_wrmtrr(&vmx_vcpu->mtrr, num, val) != 0) {
- vm_inject_gp(vmx->vm, vcpuid);
+ if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
}
break;
case MSR_IA32_MISC_ENABLE:
@@ -497,12 +491,12 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
break;
case MSR_PAT:
if (pat_valid(val))
- vmx_vcpu->guest_msrs[IDX_MSR_PAT] = val;
+ vcpu->guest_msrs[IDX_MSR_PAT] = val;
else
- vm_inject_gp(vmx->vm, vcpuid);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
break;
case MSR_TSC:
- error = vmx_set_tsc_offset(vmx, vcpuid, val - rdtsc());
+ error = vmx_set_tsc_offset(vmx, vcpu, val - rdtsc());
break;
case MSR_TSC_AUX:
if (vmx_have_msr_tsc_aux)
@@ -511,9 +505,9 @@ vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
* value when it is called immediately before guest
* entry.
*/
- vmx_vcpu->guest_msrs[IDX_MSR_TSC_AUX] = val;
+ vcpu->guest_msrs[IDX_MSR_TSC_AUX] = val;
else
- vm_inject_gp(vmx->vm, vcpuid);
+ vm_inject_gp(vmx->vm, vcpu->vcpuid);
break;
default:
error = EINVAL;
diff --git a/sys/amd64/vmm/intel/vmx_msr.h b/sys/amd64/vmm/intel/vmx_msr.h
index e3a570545e57..7aca70c49dba 100644
--- a/sys/amd64/vmm/intel/vmx_msr.h
+++ b/sys/amd64/vmm/intel/vmx_msr.h
@@ -34,13 +34,15 @@
struct vmx;
void vmx_msr_init(void);
-void vmx_msr_guest_init(struct vmx *vmx, int vcpuid);
-void vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, int vcpuid);
-void vmx_msr_guest_enter(struct vmx *vmx, int vcpuid);
-void vmx_msr_guest_exit(struct vmx *vmx, int vcpuid);
-void vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, int vcpuid);
-int vmx_rdmsr(struct vmx *, int vcpuid, u_int num, uint64_t *val, bool *retu);
-int vmx_wrmsr(struct vmx *, int vcpuid, u_int num, uint64_t val, bool *retu);
+void vmx_msr_guest_init(struct vmx *vmx, struct vmx_vcpu *vcpu);
+void vmx_msr_guest_enter_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu);
+void vmx_msr_guest_enter(struct vmx *vmx, struct vmx_vcpu *vcpu);
+void vmx_msr_guest_exit(struct vmx *vmx, struct vmx_vcpu *vcpu);
+void vmx_msr_guest_exit_tsc_aux(struct vmx *vmx, struct vmx_vcpu *vcpu);
+int vmx_rdmsr(struct vmx *, struct vmx_vcpu *vcpu, u_int num, uint64_t *val,
+ bool *retu);
+int vmx_wrmsr(struct vmx *, struct vmx_vcpu *vcpu, u_int num, uint64_t val,
+ bool *retu);
uint32_t vmx_revision(void);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 0b1df029274e..2a5910f1853a 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -106,6 +106,7 @@ struct vcpu {
enum vcpu_state state; /* (o) vcpu state */
int hostcpu; /* (o) vcpu's host cpu */
int reqidle; /* (i) request vcpu to idle */
+ void *cookie; /* (i) cpu-specific data */
struct vlapic *vlapic; /* (i) APIC device model */
enum x2apic_state x2apic_state; /* (i) APIC mode */
uint64_t exitintinfo; /* (i) events pending at VM exit */
@@ -208,30 +209,32 @@ DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
DEFINE_VMMOPS_IFUNC(void, modresume, (void))
DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
-DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, int vcpu, register_t rip,
+DEFINE_VMMOPS_IFUNC(int, run, (void *vmi, void *vcpui, register_t rip,
struct pmap *pmap, struct vm_eventinfo *info))
DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
-DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, int vcpu, int num,
+DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, int vcpu_id))
+DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vmi, void *vcpui))
+DEFINE_VMMOPS_IFUNC(int, getreg, (void *vmi, void *vcpui, int num,
uint64_t *retval))
-DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, int vcpu, int num,
+DEFINE_VMMOPS_IFUNC(int, setreg, (void *vmi, void *vcpui, int num,
uint64_t val))
-DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, int vcpu, int num,
+DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vmi, void *vcpui, int num,
struct seg_desc *desc))
-DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, int vcpu, int num,
+DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vmi, void *vcpui, int num,
struct seg_desc *desc))
-DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, int vcpu, int num, int *retval))
-DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, int vcpu, int num, int val))
+DEFINE_VMMOPS_IFUNC(int, getcap, (void *vmi, void *vcpui, int num, int *retval))
+DEFINE_VMMOPS_IFUNC(int, setcap, (void *vmi, void *vcpui, int num, int val))
DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
vm_offset_t max))
DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
-DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, int vcpu))
+DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vmi, void *vcpui))
DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (void *vmi, struct vlapic *vlapic))
#ifdef BHYVE_SNAPSHOT
DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta
*meta))
DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vmi, struct vm_snapshot_meta
- *meta, int vcpu))
-DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, int vcpuid, uint64_t now))
+ *meta, void *vcpui))
+DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vmi, void *vcpui, uint64_t now))
#endif
#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
@@ -293,12 +296,20 @@ vcpu_state2str(enum vcpu_state state)
}
#endif
+static __inline void *
+vcpu_cookie(struct vm *vm, int i)
+{
+ return (vm->vcpu[i].cookie);
+}
+
static void
vcpu_cleanup(struct vm *vm, int i, bool destroy)
{
struct vcpu *vcpu = &vm->vcpu[i];
vmmops_vlapic_cleanup(vm->cookie, vcpu->vlapic);
+ vmmops_vcpu_cleanup(vm->cookie, vcpu->cookie);
+ vcpu->cookie = NULL;
if (destroy) {
vmm_stat_free(vcpu->stats);
fpu_save_area_free(vcpu->guestfpu);
@@ -326,7 +337,8 @@ vcpu_init(struct vm *vm, int vcpu_id, bool create)
vcpu->tsc_offset = 0;
}
- vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu_id);
+ vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu_id);
+ vcpu->vlapic = vmmops_vlapic_init(vm->cookie, vcpu->cookie);
vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
vcpu->reqidle = 0;
vcpu->exitintinfo = 0;
@@ -1070,7 +1082,8 @@ vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
if (reg >= VM_REG_LAST)
return (EINVAL);
- return (vmmops_getreg(vm->cookie, vcpu, reg, retval));
+ return (vmmops_getreg(vm->cookie, vcpu_cookie(vm, vcpu), reg,
+ retval));
}
int
@@ -1085,13 +1098,13 @@ vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
if (reg >= VM_REG_LAST)
return (EINVAL);
- error = vmmops_setreg(vm->cookie, vcpuid, reg, val);
+ vcpu = &vm->vcpu[vcpuid];
+ error = vmmops_setreg(vm->cookie, vcpu->cookie, reg, val);
if (error || reg != VM_REG_GUEST_RIP)
return (error);
/* Set 'nextrip' to match the value of %rip */
VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
- vcpu = &vm->vcpu[vcpuid];
vcpu->nextrip = val;
return (0);
}
@@ -1139,7 +1152,7 @@ vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
- return (vmmops_getdesc(vm->cookie, vcpu, reg, desc));
+ return (vmmops_getdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
}
int
@@ -1152,7 +1165,7 @@ vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
- return (vmmops_setdesc(vm->cookie, vcpu, reg, desc));
+ return (vmmops_setdesc(vm->cookie, vcpu_cookie(vm, vcpu), reg, desc));
}
static void
@@ -1770,7 +1783,8 @@ restart:
restore_guest_fpustate(vcpu);
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
- error = vmmops_run(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
+ error = vmmops_run(vm->cookie, vcpu->cookie, vcpu->nextrip, pmap,
+ &evinfo);
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
save_guest_fpustate(vcpu);
@@ -2276,7 +2290,7 @@ vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
- return (vmmops_getcap(vm->cookie, vcpu, type, retval));
+ return (vmmops_getcap(vm->cookie, vcpu_cookie(vm, vcpu), type, retval));
}
int
@@ -2288,7 +2302,7 @@ vm_set_capability(struct vm *vm, int vcpu, int type, int val)
if (type < 0 || type >= VM_CAP_MAX)
return (EINVAL);
- return (vmmops_setcap(vm->cookie, vcpu, type, val));
+ return (vmmops_setcap(vm->cookie, vcpu_cookie(vm, vcpu), type, val));
}
struct vlapic *
@@ -2849,16 +2863,19 @@ done:
}
static int
-vm_snapshot_vmcx(struct vm *vm, struct vm_snapshot_meta *meta)
+vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta)
{
int error;
+ struct vcpu *vcpu;
uint16_t i, maxcpus;
error = 0;
maxcpus = vm_get_maxcpus(vm);
for (i = 0; i < maxcpus; i++) {
- error = vmmops_vcpu_snapshot(vm->cookie, meta, i);
+ vcpu = &vm->vcpu[i];
+
+ error = vmmops_vcpu_snapshot(vm->cookie, meta, vcpu->cookie);
if (error != 0) {
printf("%s: failed to snapshot vmcs/vmcb data for "
"vCPU: %d; error: %d\n", __func__, i, error);
@@ -2883,7 +2900,7 @@ vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
ret = vmmops_snapshot(vm->cookie, meta);
break;
case STRUCT_VMCX:
- ret = vm_snapshot_vmcx(vm, meta);
+ ret = vm_snapshot_vcpu(vm, meta);
break;
case STRUCT_VM:
ret = vm_snapshot_vm(vm, meta);
@@ -2949,8 +2966,8 @@ vm_restore_time(struct vm *vm)
for (i = 0; i < maxcpus; i++) {
vcpu = &vm->vcpu[i];
- error = vmmops_restore_tsc(vm->cookie, i, vcpu->tsc_offset -
- now);
+ error = vmmops_restore_tsc(vm->cookie, vcpu->cookie,
+ vcpu->tsc_offset - now);
if (error)
return (error);
}