aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Wing <rew@FreeBSD.org>2023-01-20 11:10:53 +0000
committerRobert Wing <rew@FreeBSD.org>2023-01-20 11:10:53 +0000
commitc668e8173a8fc047b54a5c51b0fe4637e87836b6 (patch)
tree6e85c0e66bc5966f1488fb3adecfc6191c2cc01f
parentccf32a68f821c5c724fb9a5b4b9576925122292f (diff)
downloadsrc-c668e8173a8fc047b54a5c51b0fe4637e87836b6.tar.gz
src-c668e8173a8fc047b54a5c51b0fe4637e87836b6.zip
vmm: take exclusive mem_segs_lock in vm_cleanup()
The consumers of vm_cleanup() are vm_reinit() and vm_destroy(). The vm_reinit() call path is, here vmmdev_ioctl() takes mem_segs_lock: vmmdev_ioctl() vm_reinit() vm_cleanup(destroy=false) The call path for vm_destroy() is (mem_segs_lock not taken): sysctl_vmm_destroy() vmmdev_destroy() vm_destroy() vm_cleanup(destroy=true) Fix this by taking mem_segs_lock in vm_cleanup() when destroy == true. Reviewed by: corvink, markj, jhb Fixes: 67b69e76e8ee ("vmm: Use an sx lock to protect the memory map.") Differential Revision: https://reviews.freebsd.org/D38071
-rw-r--r--sys/amd64/vmm/vmm.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 169109e8df6e..24f97a9244f0 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -651,6 +651,9 @@ vm_cleanup(struct vm *vm, bool destroy)
struct mem_map *mm;
int i;
+ if (destroy)
+ vm_xlock_memsegs(vm);
+
ppt_unassign_all(vm);
if (vm->iommu != NULL)
@@ -690,6 +693,7 @@ vm_cleanup(struct vm *vm, bool destroy)
if (destroy) {
for (i = 0; i < VM_MAX_MEMSEGS; i++)
vm_free_memseg(vm, i);
+ vm_unlock_memsegs(vm);
vmmops_vmspace_free(vm->vmspace);
vm->vmspace = NULL;