aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64/vmm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/amd64/vmm')
-rw-r--r--sys/amd64/vmm/intel/vmx.c2
-rw-r--r--sys/amd64/vmm/io/ppt.c7
-rw-r--r--sys/amd64/vmm/io/ppt.h6
-rw-r--r--sys/amd64/vmm/io/vlapic.c2
-rw-r--r--sys/amd64/vmm/vmm.c385
-rw-r--r--sys/amd64/vmm/vmm_dev_machdep.c256
-rw-r--r--sys/amd64/vmm/vmm_lapic.c2
-rw-r--r--sys/amd64/vmm/vmm_mem.h5
-rw-r--r--sys/amd64/vmm/vmm_mem_machdep.c61
9 files changed, 404 insertions, 322 deletions
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 842281ab862e..4189c1214b40 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -27,7 +27,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bhyve_snapshot.h"
#include <sys/param.h>
@@ -58,6 +57,7 @@
#include <machine/vmm_instruction_emul.h>
#include <machine/vmm_snapshot.h>
+#include <dev/vmm/vmm_dev.h>
#include <dev/vmm/vmm_ktr.h>
#include <dev/vmm/vmm_mem.h>
diff --git a/sys/amd64/vmm/io/ppt.c b/sys/amd64/vmm/io/ppt.c
index 2cb459fb848f..6feac5dcbbed 100644
--- a/sys/amd64/vmm/io/ppt.c
+++ b/sys/amd64/vmm/io/ppt.c
@@ -336,13 +336,6 @@ ppt_teardown_msix(struct pptdev *ppt)
}
int
-ppt_avail_devices(void)
-{
-
- return (num_pptdevs);
-}
-
-int
ppt_assigned_devices(struct vm *vm)
{
struct pptdev *ppt;
diff --git a/sys/amd64/vmm/io/ppt.h b/sys/amd64/vmm/io/ppt.h
index f97c399564d7..9377f34d50e6 100644
--- a/sys/amd64/vmm/io/ppt.h
+++ b/sys/amd64/vmm/io/ppt.h
@@ -43,12 +43,6 @@ int ppt_assigned_devices(struct vm *vm);
bool ppt_is_mmio(struct vm *vm, vm_paddr_t gpa);
/*
- * Returns the number of devices sequestered by the ppt driver for assignment
- * to virtual machines.
- */
-int ppt_avail_devices(void);
-
-/*
* The following functions should never be called directly.
* Use 'vm_assign_pptdev()' and 'vm_unassign_pptdev()' instead.
*/
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
index 9879dfa164a4..afd5045de574 100644
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -456,7 +456,7 @@ vlapic_fire_lvt(struct vlapic *vlapic, u_int lvt)
return (0);
}
if (vlapic_set_intr_ready(vlapic, vec, false))
- vcpu_notify_event(vlapic->vcpu, true);
+ vcpu_notify_lapic(vlapic->vcpu);
break;
case APIC_LVT_DM_NMI:
vm_inject_nmi(vlapic->vcpu);
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index c42da02d0bf6..2890e990633d 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -31,7 +31,6 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
-#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
#include <sys/pcpu.h>
@@ -163,7 +162,6 @@ struct vm {
void *rendezvous_arg; /* (x) [r] rendezvous func/arg */
vm_rendezvous_func_t rendezvous_func;
struct mtx rendezvous_mtx; /* (o) rendezvous lock */
- struct vmspace *vmspace; /* (o) guest's address space */
struct vm_mem mem; /* (i) [m+v] guest memory */
char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */
struct vcpu **vcpu; /* (o) guest vcpus */
@@ -190,8 +188,6 @@ struct vm {
#define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \
VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
-static int vmm_initialized;
-
static void vmmops_panic(void);
static void
@@ -201,7 +197,7 @@ vmmops_panic(void)
}
#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
- DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \
+ DEFINE_IFUNC(, ret_type, vmmops_##opname, args) \
{ \
if (vmm_is_intel()) \
return (vmm_ops_intel.opname); \
@@ -271,11 +267,7 @@ static int trap_wbinvd;
SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
"WBINVD triggers a VM-exit");
-u_int vm_maxcpu;
-SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
- &vm_maxcpu, 0, "Maximum number of vCPUs");
-
-static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
+static void vcpu_notify_event_locked(struct vcpu *vcpu);
/* global statistics */
VMM_STAT(VCPU_MIGRATIONS, "vcpu migration across host cpus");
@@ -300,14 +292,6 @@ VMM_STAT(VMEXIT_USERSPACE, "number of vm exits handled in userspace");
VMM_STAT(VMEXIT_RENDEZVOUS, "number of times rendezvous pending at exit");
VMM_STAT(VMEXIT_EXCEPTION, "number of vm exits due to exceptions");
-/*
- * Upper limit on vm_maxcpu. Limited by use of uint16_t types for CPU
- * counts as well as range of vpid values for VT-x and by the capacity
- * of cpuset_t masks. The call to new_unrhdr() in vpid_init() in
- * vmx.c requires 'vm_maxcpu + 1 <= 0xffff', hence the '- 1' below.
- */
-#define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE)
-
#ifdef KTR
static const char *
vcpu_state2str(enum vcpu_state state)
@@ -403,22 +387,12 @@ vm_exitinfo_cpuset(struct vcpu *vcpu)
return (&vcpu->exitinfo_cpuset);
}
-static int
-vmm_init(void)
+int
+vmm_modinit(void)
{
if (!vmm_is_hw_supported())
return (ENXIO);
- vm_maxcpu = mp_ncpus;
- TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu);
-
- if (vm_maxcpu > VM_MAXCPU) {
- printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU);
- vm_maxcpu = VM_MAXCPU;
- }
- if (vm_maxcpu == 0)
- vm_maxcpu = 1;
-
vmm_host_state_init();
vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
@@ -432,74 +406,21 @@ vmm_init(void)
return (vmmops_modinit(vmm_ipinum));
}
-static int
-vmm_handler(module_t mod, int what, void *arg)
+int
+vmm_modcleanup(void)
{
- int error;
-
- switch (what) {
- case MOD_LOAD:
- if (vmm_is_hw_supported()) {
- error = vmmdev_init();
- if (error != 0)
- break;
- error = vmm_init();
- if (error == 0)
- vmm_initialized = 1;
- else
- (void)vmmdev_cleanup();
- } else {
- error = ENXIO;
- }
- break;
- case MOD_UNLOAD:
- if (vmm_is_hw_supported()) {
- error = vmmdev_cleanup();
- if (error == 0) {
- vmm_suspend_p = NULL;
- vmm_resume_p = NULL;
- iommu_cleanup();
- if (vmm_ipinum != IPI_AST)
- lapic_ipi_free(vmm_ipinum);
- error = vmmops_modcleanup();
- /*
- * Something bad happened - prevent new
- * VMs from being created
- */
- if (error)
- vmm_initialized = 0;
- }
- } else {
- error = 0;
- }
- break;
- default:
- error = 0;
- break;
- }
- return (error);
+ vmm_suspend_p = NULL;
+ vmm_resume_p = NULL;
+ iommu_cleanup();
+ if (vmm_ipinum != IPI_AST)
+ lapic_ipi_free(vmm_ipinum);
+ return (vmmops_modcleanup());
}
-static moduledata_t vmm_kmod = {
- "vmm",
- vmm_handler,
- NULL
-};
-
-/*
- * vmm initialization has the following dependencies:
- *
- * - VT-x initialization requires smp_rendezvous() and therefore must happen
- * after SMP is fully functional (after SI_SUB_SMP).
- * - vmm device initialization requires an initialized devfs.
- */
-DECLARE_MODULE(vmm, vmm_kmod, MAX(SI_SUB_SMP, SI_SUB_DEVFS) + 1, SI_ORDER_ANY);
-MODULE_VERSION(vmm, 1);
-
static void
vm_init(struct vm *vm, bool create)
{
- vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
+ vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
vm->iommu = NULL;
vm->vioapic = vioapic_init(vm);
vm->vhpet = vhpet_init(vm);
@@ -563,9 +484,9 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid)
}
void
-vm_slock_vcpus(struct vm *vm)
+vm_lock_vcpus(struct vm *vm)
{
- sx_slock(&vm->vcpus_init_lock);
+ sx_xlock(&vm->vcpus_init_lock);
}
void
@@ -574,45 +495,27 @@ vm_unlock_vcpus(struct vm *vm)
sx_unlock(&vm->vcpus_init_lock);
}
-/*
- * The default CPU topology is a single thread per package.
- */
-u_int cores_per_package = 1;
-u_int threads_per_core = 1;
-
int
vm_create(const char *name, struct vm **retvm)
{
struct vm *vm;
- struct vmspace *vmspace;
-
- /*
- * If vmm.ko could not be successfully initialized then don't attempt
- * to create the virtual machine.
- */
- if (!vmm_initialized)
- return (ENXIO);
-
- if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) ==
- VM_MAX_NAMELEN + 1)
- return (EINVAL);
-
- vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48);
- if (vmspace == NULL)
- return (ENOMEM);
+ int error;
vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
+ error = vm_mem_init(&vm->mem, 0, VM_MAXUSER_ADDRESS_LA48);
+ if (error != 0) {
+ free(vm, M_VM);
+ return (error);
+ }
strcpy(vm->name, name);
- vm->vmspace = vmspace;
- vm_mem_init(&vm->mem);
mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
sx_init(&vm->vcpus_init_lock, "vm vcpus");
vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
M_ZERO);
vm->sockets = 1;
- vm->cores = cores_per_package; /* XXX backwards compatibility */
- vm->threads = threads_per_core; /* XXX backwards compatibility */
+ vm->cores = 1; /* XXX backwards compatibility */
+ vm->threads = 1; /* XXX backwards compatibility */
vm->maxcpus = vm_maxcpu;
vm_init(vm, true);
@@ -685,9 +588,6 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
vm_mem_destroy(vm);
- vmmops_vmspace_free(vm->vmspace);
- vm->vmspace = NULL;
-
free(vm->vcpu, M_VM);
sx_destroy(&vm->vcpus_init_lock);
mtx_destroy(&vm->rendezvous_mtx);
@@ -729,31 +629,28 @@ vm_name(struct vm *vm)
int
vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
{
- vm_object_t obj;
-
- if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
- return (ENOMEM);
- else
- return (0);
+ return (vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa));
}
int
vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
{
- vmm_mmio_free(vm->vmspace, gpa, len);
+ vmm_mmio_free(vm_vmspace(vm), gpa, len);
return (0);
}
static int
vm_iommu_map(struct vm *vm)
{
+ pmap_t pmap;
vm_paddr_t gpa, hpa;
struct vm_mem_map *mm;
int error, i;
sx_assert(&vm->mem.mem_segs_lock, SX_LOCKED);
+ pmap = vmspace_pmap(vm_vmspace(vm));
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
if (!vm_memseg_sysmem(vm, i))
continue;
@@ -767,7 +664,7 @@ vm_iommu_map(struct vm *vm)
mm->flags |= VM_MEMMAP_F_IOMMU;
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
- hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
+ hpa = pmap_extract(pmap, gpa);
/*
* All mappings in the vmm vmspace must be
@@ -816,7 +713,7 @@ vm_iommu_unmap(struct vm *vm)
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
- vmspace_pmap(vm->vmspace), gpa))),
+ vmspace_pmap(vm_vmspace(vm)), gpa))),
("vm_iommu_unmap: vm %p gpa %jx not wired",
vm, (uintmax_t)gpa));
iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
@@ -873,7 +770,7 @@ vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
int
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
{
-
+ /* Negative values represent VM control structure fields. */
if (reg >= VM_REG_LAST)
return (EINVAL);
@@ -885,6 +782,7 @@ vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
{
int error;
+ /* Negative values represent VM control structure fields. */
if (reg >= VM_REG_LAST)
return (EINVAL);
@@ -993,6 +891,54 @@ save_guest_fpustate(struct vcpu *vcpu)
static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
+/*
+ * Invoke the rendezvous function on the specified vcpu if applicable. Return
+ * true if the rendezvous is finished, false otherwise.
+ */
+static bool
+vm_rendezvous(struct vcpu *vcpu)
+{
+ struct vm *vm = vcpu->vm;
+ int vcpuid;
+
+ mtx_assert(&vcpu->vm->rendezvous_mtx, MA_OWNED);
+ KASSERT(vcpu->vm->rendezvous_func != NULL,
+ ("vm_rendezvous: no rendezvous pending"));
+
+ /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
+ CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus,
+ &vm->active_cpus);
+
+ vcpuid = vcpu->vcpuid;
+ if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
+ !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
+ VMM_CTR0(vcpu, "Calling rendezvous func");
+ (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
+ CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
+ }
+ if (CPU_CMP(&vm->rendezvous_req_cpus,
+ &vm->rendezvous_done_cpus) == 0) {
+ VMM_CTR0(vcpu, "Rendezvous completed");
+ CPU_ZERO(&vm->rendezvous_req_cpus);
+ vm->rendezvous_func = NULL;
+ wakeup(&vm->rendezvous_func);
+ return (true);
+ }
+ return (false);
+}
+
+static void
+vcpu_wait_idle(struct vcpu *vcpu)
+{
+ KASSERT(vcpu->state != VCPU_IDLE, ("vcpu already idle"));
+
+ vcpu->reqidle = 1;
+ vcpu_notify_event_locked(vcpu);
+ VMM_CTR1(vcpu, "vcpu state change from %s to "
+ "idle requested", vcpu_state2str(vcpu->state));
+ msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
+}
+
static int
vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
bool from_idle)
@@ -1007,13 +953,8 @@ vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
* ioctl() operating on a vcpu at any point.
*/
if (from_idle) {
- while (vcpu->state != VCPU_IDLE) {
- vcpu->reqidle = 1;
- vcpu_notify_event_locked(vcpu, false);
- VMM_CTR1(vcpu, "vcpu state change from %s to "
- "idle requested", vcpu_state2str(vcpu->state));
- msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
- }
+ while (vcpu->state != VCPU_IDLE)
+ vcpu_wait_idle(vcpu);
} else {
KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
"vcpu idle state"));
@@ -1065,6 +1006,95 @@ vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
return (0);
}
+/*
+ * Try to lock all of the vCPUs in the VM while taking care to avoid deadlocks
+ * with vm_smp_rendezvous().
+ *
+ * The complexity here suggests that the rendezvous mechanism needs a rethink.
+ */
+int
+vcpu_set_state_all(struct vm *vm, enum vcpu_state newstate)
+{
+ cpuset_t locked;
+ struct vcpu *vcpu;
+ int error, i;
+ uint16_t maxcpus;
+
+ KASSERT(newstate != VCPU_IDLE,
+ ("vcpu_set_state_all: invalid target state %d", newstate));
+
+ error = 0;
+ CPU_ZERO(&locked);
+ maxcpus = vm->maxcpus;
+
+ mtx_lock(&vm->rendezvous_mtx);
+restart:
+ if (vm->rendezvous_func != NULL) {
+ /*
+ * If we have a pending rendezvous, then the initiator may be
+ * blocked waiting for other vCPUs to execute the callback. The
+ * current thread may be a vCPU thread so we must not block
+ * waiting for the initiator, otherwise we get a deadlock.
+ * Thus, execute the callback on behalf of any idle vCPUs.
+ */
+ for (i = 0; i < maxcpus; i++) {
+ vcpu = vm_vcpu(vm, i);
+ if (vcpu == NULL)
+ continue;
+ vcpu_lock(vcpu);
+ if (vcpu->state == VCPU_IDLE) {
+ (void)vcpu_set_state_locked(vcpu, VCPU_FROZEN,
+ true);
+ CPU_SET(i, &locked);
+ }
+ if (CPU_ISSET(i, &locked)) {
+ /*
+ * We can safely execute the callback on this
+ * vCPU's behalf.
+ */
+ vcpu_unlock(vcpu);
+ (void)vm_rendezvous(vcpu);
+ vcpu_lock(vcpu);
+ }
+ vcpu_unlock(vcpu);
+ }
+ }
+
+ /*
+ * Now wait for remaining vCPUs to become idle. This may include the
+ * initiator of a rendezvous that is currently blocked on the rendezvous
+ * mutex.
+ */
+ CPU_FOREACH_ISCLR(i, &locked) {
+ if (i >= maxcpus)
+ break;
+ vcpu = vm_vcpu(vm, i);
+ if (vcpu == NULL)
+ continue;
+ vcpu_lock(vcpu);
+ while (vcpu->state != VCPU_IDLE) {
+ mtx_unlock(&vm->rendezvous_mtx);
+ vcpu_wait_idle(vcpu);
+ vcpu_unlock(vcpu);
+ mtx_lock(&vm->rendezvous_mtx);
+ if (vm->rendezvous_func != NULL)
+ goto restart;
+ vcpu_lock(vcpu);
+ }
+ error = vcpu_set_state_locked(vcpu, newstate, true);
+ vcpu_unlock(vcpu);
+ if (error != 0) {
+ /* Roll back state changes. */
+ CPU_FOREACH_ISSET(i, &locked)
+ (void)vcpu_set_state(vcpu, VCPU_IDLE, false);
+ break;
+ }
+ CPU_SET(i, &locked);
+ }
+ mtx_unlock(&vm->rendezvous_mtx);
+ return (error);
+}
+
static void
vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
{
@@ -1086,36 +1116,23 @@ vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
static int
vm_handle_rendezvous(struct vcpu *vcpu)
{
- struct vm *vm = vcpu->vm;
+ struct vm *vm;
struct thread *td;
- int error, vcpuid;
- error = 0;
- vcpuid = vcpu->vcpuid;
td = curthread;
+ vm = vcpu->vm;
+
mtx_lock(&vm->rendezvous_mtx);
while (vm->rendezvous_func != NULL) {
- /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
- CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
-
- if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
- !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
- VMM_CTR0(vcpu, "Calling rendezvous func");
- (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
- CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
- }
- if (CPU_CMP(&vm->rendezvous_req_cpus,
- &vm->rendezvous_done_cpus) == 0) {
- VMM_CTR0(vcpu, "Rendezvous completed");
- CPU_ZERO(&vm->rendezvous_req_cpus);
- vm->rendezvous_func = NULL;
- wakeup(&vm->rendezvous_func);
+ if (vm_rendezvous(vcpu))
break;
- }
+
VMM_CTR0(vcpu, "Wait for rendezvous completion");
mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
"vmrndv", hz);
if (td_ast_pending(td, TDA_SUSPEND)) {
+ int error;
+
mtx_unlock(&vm->rendezvous_mtx);
error = thread_check_susp(td, true);
if (error != 0)
@@ -1249,7 +1266,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
("vm_handle_paging: invalid fault_type %d", ftype));
if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
- rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
+ rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm_vmspace(vm)),
vme->u.paging.gpa, ftype);
if (rv == 0) {
VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
@@ -1259,7 +1276,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu)
}
}
- map = &vm->vmspace->vm_map;
+ map = &vm_vmspace(vm)->vm_map;
rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
@@ -1397,7 +1414,7 @@ vm_handle_suspend(struct vcpu *vcpu, bool *retu)
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->suspended_cpus)) {
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
}
@@ -1471,7 +1488,7 @@ vm_suspend(struct vm *vm, enum vm_suspend_how how)
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
return (0);
@@ -1560,7 +1577,7 @@ vm_run(struct vcpu *vcpu)
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
return (EINVAL);
- pmap = vmspace_pmap(vm->vmspace);
+ pmap = vmspace_pmap(vm_vmspace(vm));
vme = &vcpu->exitinfo;
evinfo.rptr = &vm->rendezvous_req_cpus;
evinfo.sptr = &vm->suspend;
@@ -1951,7 +1968,7 @@ vm_inject_nmi(struct vcpu *vcpu)
{
vcpu->nmi_pending = 1;
- vcpu_notify_event(vcpu, false);
+ vcpu_notify_event(vcpu);
return (0);
}
@@ -1978,7 +1995,7 @@ vm_inject_extint(struct vcpu *vcpu)
{
vcpu->extint_pending = 1;
- vcpu_notify_event(vcpu, false);
+ vcpu_notify_event(vcpu);
return (0);
}
@@ -2149,14 +2166,14 @@ vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
vm->debug_cpus = vm->active_cpus;
for (int i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &vm->active_cpus))
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
} else {
if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
return (EINVAL);
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
- vcpu_notify_event(vcpu, false);
+ vcpu_notify_event(vcpu);
}
return (0);
}
@@ -2264,7 +2281,7 @@ vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
* to the host_cpu to cause the vcpu to trap into the hypervisor.
*/
static void
-vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
+vcpu_notify_event_locked(struct vcpu *vcpu)
{
int hostcpu;
@@ -2272,12 +2289,7 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
if (vcpu->state == VCPU_RUNNING) {
KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
if (hostcpu != curcpu) {
- if (lapic_intr) {
- vlapic_post_intr(vcpu->vlapic, hostcpu,
- vmm_ipinum);
- } else {
- ipi_cpu(hostcpu, vmm_ipinum);
- }
+ ipi_cpu(hostcpu, vmm_ipinum);
} else {
/*
* If the 'vcpu' is running on 'curcpu' then it must
@@ -2295,17 +2307,22 @@ vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
}
void
-vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
+vcpu_notify_event(struct vcpu *vcpu)
{
vcpu_lock(vcpu);
- vcpu_notify_event_locked(vcpu, lapic_intr);
+ vcpu_notify_event_locked(vcpu);
vcpu_unlock(vcpu);
}
-struct vmspace *
-vm_vmspace(struct vm *vm)
+void
+vcpu_notify_lapic(struct vcpu *vcpu)
{
- return (vm->vmspace);
+ vcpu_lock(vcpu);
+ if (vcpu->state == VCPU_RUNNING && vcpu->hostcpu != curcpu)
+ vlapic_post_intr(vcpu->vlapic, vcpu->hostcpu, vmm_ipinum);
+ else
+ vcpu_notify_event_locked(vcpu);
+ vcpu_unlock(vcpu);
}
struct vm_mem *
@@ -2366,7 +2383,7 @@ restart:
*/
for (i = 0; i < vm->maxcpus; i++) {
if (CPU_ISSET(i, &dest))
- vcpu_notify_event(vm_vcpu(vm, i), false);
+ vcpu_notify_event(vm_vcpu(vm, i));
}
return (vm_handle_rendezvous(vcpu));
@@ -2519,7 +2536,7 @@ vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
if (vcpu->vcpuid == 0) {
vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
- vmspace_resident_count(vcpu->vm->vmspace));
+ vmspace_resident_count(vm_vmspace(vcpu->vm)));
}
}
@@ -2529,7 +2546,7 @@ vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
if (vcpu->vcpuid == 0) {
vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
- pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
+ pmap_wired_count(vmspace_pmap(vm_vmspace(vcpu->vm))));
}
}
diff --git a/sys/amd64/vmm/vmm_dev_machdep.c b/sys/amd64/vmm/vmm_dev_machdep.c
index d8d2b460404c..b84be809ea24 100644
--- a/sys/amd64/vmm/vmm_dev_machdep.c
+++ b/sys/amd64/vmm/vmm_dev_machdep.c
@@ -48,6 +48,7 @@
#include <x86/apicreg.h>
#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_mem.h>
#include <dev/vmm/vmm_stat.h>
#include "vmm_lapic.h"
@@ -123,12 +124,16 @@ const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = {
VMMDEV_IOCTL(VM_SET_KERNEMU_DEV, VMMDEV_IOCTL_LOCK_ONE_VCPU),
VMMDEV_IOCTL(VM_BIND_PPTDEV,
- VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS |
+ VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
VMMDEV_IOCTL(VM_UNBIND_PPTDEV,
- VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS |
+ VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
- VMMDEV_IOCTL(VM_MAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
- VMMDEV_IOCTL(VM_UNMAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+ VMMDEV_IOCTL(VM_MAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS |
+ VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
+ VMMDEV_IOCTL(VM_UNMAP_PPTDEV_MMIO, VMMDEV_IOCTL_LOCK_ALL_VCPUS |
+ VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
#ifdef BHYVE_SNAPSHOT
#ifdef COMPAT_FREEBSD13
VMMDEV_IOCTL(VM_SNAPSHOT_REQ_13, VMMDEV_IOCTL_LOCK_ALL_VCPUS),
@@ -146,9 +151,9 @@ const struct vmmdev_ioctl vmmdev_machdep_ioctls[] = {
VMMDEV_IOCTL(VM_LAPIC_LOCAL_IRQ, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU),
- VMMDEV_IOCTL(VM_PPTDEV_MSI, 0),
- VMMDEV_IOCTL(VM_PPTDEV_MSIX, 0),
- VMMDEV_IOCTL(VM_PPTDEV_DISABLE_MSIX, 0),
+ VMMDEV_IOCTL(VM_PPTDEV_MSI, VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
+ VMMDEV_IOCTL(VM_PPTDEV_MSIX, VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
+ VMMDEV_IOCTL(VM_PPTDEV_DISABLE_MSIX, VMMDEV_IOCTL_PRIV_CHECK_DRIVER),
VMMDEV_IOCTL(VM_LAPIC_MSI, 0),
VMMDEV_IOCTL(VM_IOAPIC_ASSERT_IRQ, 0),
VMMDEV_IOCTL(VM_IOAPIC_DEASSERT_IRQ, 0),
@@ -171,40 +176,13 @@ int
vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
int fflag, struct thread *td)
{
- struct vm_seg_desc *vmsegdesc;
- struct vm_run *vmrun;
-#ifdef COMPAT_FREEBSD13
- struct vm_run_13 *vmrun_13;
-#endif
- struct vm_exception *vmexc;
- struct vm_lapic_irq *vmirq;
- struct vm_lapic_msi *vmmsi;
- struct vm_ioapic_irq *ioapic_irq;
- struct vm_isa_irq *isa_irq;
- struct vm_isa_irq_trigger *isa_irq_trigger;
- struct vm_pptdev *pptdev;
- struct vm_pptdev_mmio *pptmmio;
- struct vm_pptdev_msi *pptmsi;
- struct vm_pptdev_msix *pptmsix;
- struct vm_x2apic *x2apic;
- struct vm_gpa_pte *gpapte;
- struct vm_gla2gpa *gg;
- struct vm_intinfo *vmii;
- struct vm_rtc_time *rtctime;
- struct vm_rtc_data *rtcdata;
- struct vm_readwrite_kernemu_device *kernemu;
-#ifdef BHYVE_SNAPSHOT
- struct vm_snapshot_meta *snapshot_meta;
-#ifdef COMPAT_FREEBSD13
- struct vm_snapshot_meta_13 *snapshot_13;
-#endif
-#endif
int error;
error = 0;
switch (cmd) {
case VM_RUN: {
struct vm_exit *vme;
+ struct vm_run *vmrun;
vmrun = (struct vm_run *)data;
vme = vm_exitinfo(vcpu);
@@ -242,6 +220,7 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
case VM_RUN_13: {
struct vm_exit *vme;
struct vm_exit_13 *vme_13;
+ struct vm_run_13 *vmrun_13;
vmrun_13 = (struct vm_run_13 *)data;
vme_13 = &vmrun_13->vm_exit;
@@ -280,85 +259,123 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
break;
}
#endif
- case VM_PPTDEV_MSI:
+ case VM_PPTDEV_MSI: {
+ struct vm_pptdev_msi *pptmsi;
+
pptmsi = (struct vm_pptdev_msi *)data;
- error = ppt_setup_msi(vm,
- pptmsi->bus, pptmsi->slot, pptmsi->func,
- pptmsi->addr, pptmsi->msg,
- pptmsi->numvec);
+ error = ppt_setup_msi(vm, pptmsi->bus, pptmsi->slot,
+ pptmsi->func, pptmsi->addr, pptmsi->msg, pptmsi->numvec);
break;
- case VM_PPTDEV_MSIX:
+ }
+ case VM_PPTDEV_MSIX: {
+ struct vm_pptdev_msix *pptmsix;
+
pptmsix = (struct vm_pptdev_msix *)data;
- error = ppt_setup_msix(vm,
- pptmsix->bus, pptmsix->slot,
- pptmsix->func, pptmsix->idx,
- pptmsix->addr, pptmsix->msg,
- pptmsix->vector_control);
+ error = ppt_setup_msix(vm, pptmsix->bus, pptmsix->slot,
+ pptmsix->func, pptmsix->idx, pptmsix->addr, pptmsix->msg,
+ pptmsix->vector_control);
break;
- case VM_PPTDEV_DISABLE_MSIX:
+ }
+ case VM_PPTDEV_DISABLE_MSIX: {
+ struct vm_pptdev *pptdev;
+
pptdev = (struct vm_pptdev *)data;
error = ppt_disable_msix(vm, pptdev->bus, pptdev->slot,
- pptdev->func);
+ pptdev->func);
break;
- case VM_MAP_PPTDEV_MMIO:
+ }
+ case VM_MAP_PPTDEV_MMIO: {
+ struct vm_pptdev_mmio *pptmmio;
+
pptmmio = (struct vm_pptdev_mmio *)data;
error = ppt_map_mmio(vm, pptmmio->bus, pptmmio->slot,
- pptmmio->func, pptmmio->gpa, pptmmio->len,
- pptmmio->hpa);
+ pptmmio->func, pptmmio->gpa, pptmmio->len, pptmmio->hpa);
break;
- case VM_UNMAP_PPTDEV_MMIO:
+ }
+ case VM_UNMAP_PPTDEV_MMIO: {
+ struct vm_pptdev_mmio *pptmmio;
+
pptmmio = (struct vm_pptdev_mmio *)data;
error = ppt_unmap_mmio(vm, pptmmio->bus, pptmmio->slot,
- pptmmio->func, pptmmio->gpa, pptmmio->len);
+ pptmmio->func, pptmmio->gpa, pptmmio->len);
break;
- case VM_BIND_PPTDEV:
+ }
+ case VM_BIND_PPTDEV: {
+ struct vm_pptdev *pptdev;
+
pptdev = (struct vm_pptdev *)data;
error = vm_assign_pptdev(vm, pptdev->bus, pptdev->slot,
- pptdev->func);
+ pptdev->func);
break;
- case VM_UNBIND_PPTDEV:
+ }
+ case VM_UNBIND_PPTDEV: {
+ struct vm_pptdev *pptdev;
+
pptdev = (struct vm_pptdev *)data;
error = vm_unassign_pptdev(vm, pptdev->bus, pptdev->slot,
- pptdev->func);
+ pptdev->func);
break;
- case VM_INJECT_EXCEPTION:
+ }
+ case VM_INJECT_EXCEPTION: {
+ struct vm_exception *vmexc;
+
vmexc = (struct vm_exception *)data;
error = vm_inject_exception(vcpu,
vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
vmexc->restart_instruction);
break;
+ }
case VM_INJECT_NMI:
error = vm_inject_nmi(vcpu);
break;
- case VM_LAPIC_IRQ:
+ case VM_LAPIC_IRQ: {
+ struct vm_lapic_irq *vmirq;
+
vmirq = (struct vm_lapic_irq *)data;
error = lapic_intr_edge(vcpu, vmirq->vector);
break;
- case VM_LAPIC_LOCAL_IRQ:
+ }
+ case VM_LAPIC_LOCAL_IRQ: {
+ struct vm_lapic_irq *vmirq;
+
vmirq = (struct vm_lapic_irq *)data;
error = lapic_set_local_intr(vm, vcpu, vmirq->vector);
break;
- case VM_LAPIC_MSI:
+ }
+ case VM_LAPIC_MSI: {
+ struct vm_lapic_msi *vmmsi;
+
vmmsi = (struct vm_lapic_msi *)data;
error = lapic_intr_msi(vm, vmmsi->addr, vmmsi->msg);
break;
- case VM_IOAPIC_ASSERT_IRQ:
+ }
+ case VM_IOAPIC_ASSERT_IRQ: {
+ struct vm_ioapic_irq *ioapic_irq;
+
ioapic_irq = (struct vm_ioapic_irq *)data;
error = vioapic_assert_irq(vm, ioapic_irq->irq);
break;
- case VM_IOAPIC_DEASSERT_IRQ:
+ }
+ case VM_IOAPIC_DEASSERT_IRQ: {
+ struct vm_ioapic_irq *ioapic_irq;
+
ioapic_irq = (struct vm_ioapic_irq *)data;
error = vioapic_deassert_irq(vm, ioapic_irq->irq);
break;
- case VM_IOAPIC_PULSE_IRQ:
+ }
+ case VM_IOAPIC_PULSE_IRQ: {
+ struct vm_ioapic_irq *ioapic_irq;
+
ioapic_irq = (struct vm_ioapic_irq *)data;
error = vioapic_pulse_irq(vm, ioapic_irq->irq);
break;
+ }
case VM_IOAPIC_PINCOUNT:
*(int *)data = vioapic_pincount(vm);
break;
case VM_SET_KERNEMU_DEV:
case VM_GET_KERNEMU_DEV: {
+ struct vm_readwrite_kernemu_device *kernemu;
mem_region_write_t mwrite;
mem_region_read_t mread;
int size;
@@ -395,60 +412,86 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
error = mread(vcpu, kernemu->gpa,
&kernemu->value, size, &arg);
break;
- }
- case VM_ISA_ASSERT_IRQ:
+ }
+ case VM_ISA_ASSERT_IRQ: {
+ struct vm_isa_irq *isa_irq;
+
isa_irq = (struct vm_isa_irq *)data;
error = vatpic_assert_irq(vm, isa_irq->atpic_irq);
if (error == 0 && isa_irq->ioapic_irq != -1)
error = vioapic_assert_irq(vm, isa_irq->ioapic_irq);
break;
- case VM_ISA_DEASSERT_IRQ:
+ }
+ case VM_ISA_DEASSERT_IRQ: {
+ struct vm_isa_irq *isa_irq;
+
isa_irq = (struct vm_isa_irq *)data;
error = vatpic_deassert_irq(vm, isa_irq->atpic_irq);
if (error == 0 && isa_irq->ioapic_irq != -1)
error = vioapic_deassert_irq(vm, isa_irq->ioapic_irq);
break;
- case VM_ISA_PULSE_IRQ:
+ }
+ case VM_ISA_PULSE_IRQ: {
+ struct vm_isa_irq *isa_irq;
+
isa_irq = (struct vm_isa_irq *)data;
error = vatpic_pulse_irq(vm, isa_irq->atpic_irq);
if (error == 0 && isa_irq->ioapic_irq != -1)
error = vioapic_pulse_irq(vm, isa_irq->ioapic_irq);
break;
- case VM_ISA_SET_IRQ_TRIGGER:
+ }
+ case VM_ISA_SET_IRQ_TRIGGER: {
+ struct vm_isa_irq_trigger *isa_irq_trigger;
+
isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
error = vatpic_set_irq_trigger(vm,
isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
break;
- case VM_SET_SEGMENT_DESCRIPTOR:
+ }
+ case VM_SET_SEGMENT_DESCRIPTOR: {
+ struct vm_seg_desc *vmsegdesc;
+
vmsegdesc = (struct vm_seg_desc *)data;
- error = vm_set_seg_desc(vcpu,
- vmsegdesc->regnum,
- &vmsegdesc->desc);
+ error = vm_set_seg_desc(vcpu, vmsegdesc->regnum,
+ &vmsegdesc->desc);
break;
- case VM_GET_SEGMENT_DESCRIPTOR:
+ }
+ case VM_GET_SEGMENT_DESCRIPTOR: {
+ struct vm_seg_desc *vmsegdesc;
+
vmsegdesc = (struct vm_seg_desc *)data;
- error = vm_get_seg_desc(vcpu,
- vmsegdesc->regnum,
- &vmsegdesc->desc);
+ error = vm_get_seg_desc(vcpu, vmsegdesc->regnum,
+ &vmsegdesc->desc);
break;
- case VM_SET_X2APIC_STATE:
+ }
+ case VM_SET_X2APIC_STATE: {
+ struct vm_x2apic *x2apic;
+
x2apic = (struct vm_x2apic *)data;
error = vm_set_x2apic_state(vcpu, x2apic->state);
break;
- case VM_GET_X2APIC_STATE:
+ }
+ case VM_GET_X2APIC_STATE: {
+ struct vm_x2apic *x2apic;
+
x2apic = (struct vm_x2apic *)data;
error = vm_get_x2apic_state(vcpu, &x2apic->state);
break;
- case VM_GET_GPA_PMAP:
+ }
+ case VM_GET_GPA_PMAP: {
+ struct vm_gpa_pte *gpapte;
+
gpapte = (struct vm_gpa_pte *)data;
- pmap_get_mapping(vmspace_pmap(vm_vmspace(vm)),
- gpapte->gpa, gpapte->pte, &gpapte->ptenum);
- error = 0;
+ pmap_get_mapping(vmspace_pmap(vm_vmspace(vm)), gpapte->gpa,
+ gpapte->pte, &gpapte->ptenum);
break;
+ }
case VM_GET_HPET_CAPABILITIES:
error = vhpet_getcap((struct vm_hpet_cap *)data);
break;
case VM_GLA2GPA: {
+ struct vm_gla2gpa *gg;
+
CTASSERT(PROT_READ == VM_PROT_READ);
CTASSERT(PROT_WRITE == VM_PROT_WRITE);
CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
@@ -459,50 +502,76 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
("%s: vm_gla2gpa unknown error %d", __func__, error));
break;
}
- case VM_GLA2GPA_NOFAULT:
+ case VM_GLA2GPA_NOFAULT: {
+ struct vm_gla2gpa *gg;
+
gg = (struct vm_gla2gpa *)data;
error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
gg->prot, &gg->gpa, &gg->fault);
KASSERT(error == 0 || error == EFAULT,
("%s: vm_gla2gpa unknown error %d", __func__, error));
break;
- case VM_SET_INTINFO:
+ }
+ case VM_SET_INTINFO: {
+ struct vm_intinfo *vmii;
+
vmii = (struct vm_intinfo *)data;
error = vm_exit_intinfo(vcpu, vmii->info1);
break;
- case VM_GET_INTINFO:
+ }
+ case VM_GET_INTINFO: {
+ struct vm_intinfo *vmii;
+
vmii = (struct vm_intinfo *)data;
error = vm_get_intinfo(vcpu, &vmii->info1, &vmii->info2);
break;
- case VM_RTC_WRITE:
+ }
+ case VM_RTC_WRITE: {
+ struct vm_rtc_data *rtcdata;
+
rtcdata = (struct vm_rtc_data *)data;
error = vrtc_nvram_write(vm, rtcdata->offset,
rtcdata->value);
break;
- case VM_RTC_READ:
+ }
+ case VM_RTC_READ: {
+ struct vm_rtc_data *rtcdata;
+
rtcdata = (struct vm_rtc_data *)data;
error = vrtc_nvram_read(vm, rtcdata->offset,
&rtcdata->value);
break;
- case VM_RTC_SETTIME:
+ }
+ case VM_RTC_SETTIME: {
+ struct vm_rtc_time *rtctime;
+
rtctime = (struct vm_rtc_time *)data;
error = vrtc_set_time(vm, rtctime->secs);
break;
- case VM_RTC_GETTIME:
- error = 0;
+ }
+ case VM_RTC_GETTIME: {
+ struct vm_rtc_time *rtctime;
+
rtctime = (struct vm_rtc_time *)data;
rtctime->secs = vrtc_get_time(vm);
break;
+ }
case VM_RESTART_INSTRUCTION:
error = vm_restart_instruction(vcpu);
break;
#ifdef BHYVE_SNAPSHOT
- case VM_SNAPSHOT_REQ:
+ case VM_SNAPSHOT_REQ: {
+ struct vm_snapshot_meta *snapshot_meta;
+
snapshot_meta = (struct vm_snapshot_meta *)data;
error = vm_snapshot_req(vm, snapshot_meta);
break;
+ }
#ifdef COMPAT_FREEBSD13
- case VM_SNAPSHOT_REQ_13:
+ case VM_SNAPSHOT_REQ_13: {
+ struct vm_snapshot_meta *snapshot_meta;
+ struct vm_snapshot_meta_13 *snapshot_13;
+
/*
* The old structure just has an additional pointer at
* the start that is ignored.
@@ -512,6 +581,7 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data,
(struct vm_snapshot_meta *)&snapshot_13->dev_data;
error = vm_snapshot_req(vm, snapshot_meta);
break;
+ }
#endif
case VM_RESTORE_TIME:
error = vm_restore_time(vm);
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
index 0cae01f172ec..63bdee69bb59 100644
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -61,7 +61,7 @@ lapic_set_intr(struct vcpu *vcpu, int vector, bool level)
vlapic = vm_lapic(vcpu);
if (vlapic_set_intr_ready(vlapic, vector, level))
- vcpu_notify_event(vcpu, true);
+ vcpu_notify_lapic(vcpu);
return (0);
}
diff --git a/sys/amd64/vmm/vmm_mem.h b/sys/amd64/vmm/vmm_mem.h
index 41b9bf07c4fc..d905fd37001d 100644
--- a/sys/amd64/vmm/vmm_mem.h
+++ b/sys/amd64/vmm/vmm_mem.h
@@ -30,10 +30,9 @@
#define _VMM_MEM_H_
struct vmspace;
-struct vm_object;
-struct vm_object *vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
- vm_paddr_t hpa);
+int vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
+ vm_paddr_t hpa);
void vmm_mmio_free(struct vmspace *, vm_paddr_t gpa, size_t size);
vm_paddr_t vmm_mem_maxaddr(void);
diff --git a/sys/amd64/vmm/vmm_mem_machdep.c b/sys/amd64/vmm/vmm_mem_machdep.c
index e96c9e4bdc66..afb3a0274e2a 100644
--- a/sys/amd64/vmm/vmm_mem_machdep.c
+++ b/sys/amd64/vmm/vmm_mem_machdep.c
@@ -36,6 +36,7 @@
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
+#include <vm/vm_extern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
@@ -45,40 +46,48 @@
#include "vmm_mem.h"
-vm_object_t
+int
vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
- vm_paddr_t hpa)
+ vm_paddr_t hpa)
{
- int error;
- vm_object_t obj;
struct sglist *sg;
+ vm_object_t obj;
+ int error;
+
+ if (gpa + len < gpa || hpa + len < hpa || (gpa & PAGE_MASK) != 0 ||
+ (hpa & PAGE_MASK) != 0 || (len & PAGE_MASK) != 0)
+ return (EINVAL);
sg = sglist_alloc(1, M_WAITOK);
error = sglist_append_phys(sg, hpa, len);
KASSERT(error == 0, ("error %d appending physaddr to sglist", error));
obj = vm_pager_allocate(OBJT_SG, sg, len, VM_PROT_RW, 0, NULL);
- if (obj != NULL) {
- /*
- * VT-x ignores the MTRR settings when figuring out the
- * memory type for translations obtained through EPT.
- *
- * Therefore we explicitly force the pages provided by
- * this object to be mapped as uncacheable.
- */
- VM_OBJECT_WLOCK(obj);
- error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
- VM_OBJECT_WUNLOCK(obj);
- if (error != KERN_SUCCESS) {
- panic("vmm_mmio_alloc: vm_object_set_memattr error %d",
- error);
- }
- error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0,
- VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
- if (error != KERN_SUCCESS) {
- vm_object_deallocate(obj);
- obj = NULL;
- }
+ if (obj == NULL)
+ return (ENOMEM);
+
+ /*
+ * VT-x ignores the MTRR settings when figuring out the memory type for
+ * translations obtained through EPT.
+ *
+ * Therefore we explicitly force the pages provided by this object to be
+ * mapped as uncacheable.
+ */
+ VM_OBJECT_WLOCK(obj);
+ error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
+ VM_OBJECT_WUNLOCK(obj);
+ if (error != KERN_SUCCESS)
+ panic("vmm_mmio_alloc: vm_object_set_memattr error %d", error);
+
+ vm_map_lock(&vmspace->vm_map);
+ error = vm_map_insert(&vmspace->vm_map, obj, 0, gpa, gpa + len,
+ VM_PROT_RW, VM_PROT_RW, 0);
+ vm_map_unlock(&vmspace->vm_map);
+ if (error != KERN_SUCCESS) {
+ error = vm_mmap_to_errno(error);
+ vm_object_deallocate(obj);
+ } else {
+ error = 0;
}
/*
@@ -94,7 +103,7 @@ vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
*/
sglist_free(sg);
- return (obj);
+ return (error);
}
void