diff options
Diffstat (limited to 'sys/riscv')
| -rw-r--r-- | sys/riscv/conf/std.allwinner | 3 | ||||
| -rw-r--r-- | sys/riscv/conf/std.starfive | 3 | ||||
| -rw-r--r-- | sys/riscv/include/atomic.h | 3 | ||||
| -rw-r--r-- | sys/riscv/include/ieeefp.h | 5 | ||||
| -rw-r--r-- | sys/riscv/include/kexec.h | 39 | ||||
| -rw-r--r-- | sys/riscv/include/vmm.h | 30 | ||||
| -rw-r--r-- | sys/riscv/include/vmm_dev.h | 2 | ||||
| -rw-r--r-- | sys/riscv/riscv/elf_machdep.c | 4 | ||||
| -rw-r--r-- | sys/riscv/riscv/fpe.c | 66 | ||||
| -rw-r--r-- | sys/riscv/starfive/jh7110_pcie.c | 12 | ||||
| -rw-r--r-- | sys/riscv/vmm/riscv.h | 23 | ||||
| -rw-r--r-- | sys/riscv/vmm/vmm.c | 146 | ||||
| -rw-r--r-- | sys/riscv/vmm/vmm_aplic.c | 10 | ||||
| -rw-r--r-- | sys/riscv/vmm/vmm_aplic.h | 1 | ||||
| -rw-r--r-- | sys/riscv/vmm/vmm_dev_machdep.c | 37 |
15 files changed, 178 insertions, 206 deletions
diff --git a/sys/riscv/conf/std.allwinner b/sys/riscv/conf/std.allwinner index 34fe195b01ba..ecd789f39963 100644 --- a/sys/riscv/conf/std.allwinner +++ b/sys/riscv/conf/std.allwinner @@ -17,4 +17,7 @@ device awg # Allwinner EMAC Gigabit Ethernet device musb # Mentor Graphics USB OTG controller +# DTBs +makeoptions MODULES_EXTRA+="dtb/allwinner" + files "../allwinner/files.allwinner" diff --git a/sys/riscv/conf/std.starfive b/sys/riscv/conf/std.starfive index 9bdb1af9e79c..6a0e56cc84bd 100644 --- a/sys/riscv/conf/std.starfive +++ b/sys/riscv/conf/std.starfive @@ -10,4 +10,7 @@ device eqos device dwmmc device dwmmc_starfive +# DTBs +makeoptions MODULES_EXTRA+="dtb/starfive" + files "../starfive/files.starfive" diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h index 74ffc171b028..c90cb02c482c 100644 --- a/sys/riscv/include/atomic.h +++ b/sys/riscv/include/atomic.h @@ -656,4 +656,7 @@ atomic_thread_fence_seq_cst(void) #include <sys/_atomic_subword.h> +#define atomic_set_short atomic_set_16 +#define atomic_clear_short atomic_clear_16 + #endif /* _MACHINE_ATOMIC_H_ */ diff --git a/sys/riscv/include/ieeefp.h b/sys/riscv/include/ieeefp.h index 03a96e8a000f..84b554a04c65 100644 --- a/sys/riscv/include/ieeefp.h +++ b/sys/riscv/include/ieeefp.h @@ -5,4 +5,9 @@ /* TODO */ typedef int fp_except_t; +__BEGIN_DECLS +extern fp_except_t fpgetmask(void); +extern fp_except_t fpsetmask(fp_except_t); +__END_DECLS + #endif /* _MACHINE_IEEEFP_H_ */ diff --git a/sys/riscv/include/kexec.h b/sys/riscv/include/kexec.h new file mode 100644 index 000000000000..5fb6fd321989 --- /dev/null +++ b/sys/riscv/include/kexec.h @@ -0,0 +1,39 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 Juniper Networks, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _RISCV_KEXEC_H_ +#define _RISCV_KEXEC_H_ + +int +kexec_load_md(struct kexec_image *image) +{ + return (ENOSYS); +} + +#define kexec_reboot_md(x) do {} while (0) +#endif /* _RISCV_KEXEC_H_ */ diff --git a/sys/riscv/include/vmm.h b/sys/riscv/include/vmm.h index 1221521be368..361140834805 100644 --- a/sys/riscv/include/vmm.h +++ b/sys/riscv/include/vmm.h @@ -49,6 +49,7 @@ enum vm_suspend_how { VM_SUSPEND_RESET, VM_SUSPEND_POWEROFF, VM_SUSPEND_HALT, + VM_SUSPEND_DESTROY, VM_SUSPEND_LAST }; @@ -102,9 +103,6 @@ enum vm_reg_name { #define VM_INTINFO_HWEXCEPTION (3 << 8) #define VM_INTINFO_SWINTR (4 << 8) -#define VM_MAX_NAMELEN 32 -#define VM_MAX_SUFFIXLEN 15 - #ifdef _KERNEL struct vm; @@ -122,10 +120,33 @@ struct vm_eventinfo { int *iptr; /* reqidle cookie */ }; +#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \ + ret_type vmmops_##opname args + +DECLARE_VMMOPS_FUNC(int, modinit, (void)); +DECLARE_VMMOPS_FUNC(int, modcleanup, (void)); +DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap)); +DECLARE_VMMOPS_FUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging, + uint64_t gla, int prot, uint64_t *gpa, int *is_fault)); +DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap, + struct vm_eventinfo *info)); +DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi)); +DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, + int vcpu_id)); +DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui)); +DECLARE_VMMOPS_FUNC(int, exception, (void *vcpui, uint64_t scause)); +DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)); +DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val)); +DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval)); +DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val)); +DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, + vm_offset_t max)); +DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace)); + int vm_create(const char *name, struct vm **retvm); struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); void vm_disable_vcpu_creation(struct vm *vm); -void vm_slock_vcpus(struct vm *vm); +void vm_lock_vcpus(struct vm *vm); void vm_unlock_vcpus(struct vm *vm); void vm_destroy(struct vm *vm); int vm_reinit(struct vm *vm); @@ -211,7 +232,6 @@ vcpu_should_yield(struct vcpu *vcpu) void *vcpu_stats(struct vcpu *vcpu); void vcpu_notify_event(struct vcpu *vcpu); -struct vmspace *vm_vmspace(struct vm *vm); struct vm_mem *vm_mem(struct vm *vm); enum vm_reg_name vm_segment_name(int seg_encoding); diff --git a/sys/riscv/include/vmm_dev.h b/sys/riscv/include/vmm_dev.h index 4d30d5a1c35b..a60e545b8f52 100644 --- a/sys/riscv/include/vmm_dev.h +++ b/sys/riscv/include/vmm_dev.h @@ -38,6 +38,8 @@ #include <machine/vmm.h> +#include <dev/vmm/vmm_param.h> + struct vm_memmap { vm_paddr_t gpa; int segid; /* memory segment */ diff --git a/sys/riscv/riscv/elf_machdep.c b/sys/riscv/riscv/elf_machdep.c index 67b1fcc4c1a9..5bd4af4c15f8 100644 --- a/sys/riscv/riscv/elf_machdep.c +++ b/sys/riscv/riscv/elf_machdep.c @@ -100,7 +100,7 @@ static struct sysentvec elf64_freebsd_sysvec = { }; INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec); -static Elf64_Brandinfo freebsd_brand_info = { +static const Elf64_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_RISCV, .compat_3_brand = "FreeBSD", @@ -110,7 +110,7 @@ static Elf64_Brandinfo freebsd_brand_info = { .brand_note = &elf64_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; -SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, +C_SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info); static void diff --git a/sys/riscv/riscv/fpe.c b/sys/riscv/riscv/fpe.c index b6c66e5e4f09..63103a794a8e 100644 --- a/sys/riscv/riscv/fpe.c +++ b/sys/riscv/riscv/fpe.c @@ -69,39 +69,39 @@ fpe_store(struct fpreg *regs) __asm __volatile( "frcsr %0 \n" - "fsd f0, (16 * 0)(%1)\n" - "fsd f1, (16 * 1)(%1)\n" - "fsd f2, (16 * 2)(%1)\n" - "fsd f3, (16 * 3)(%1)\n" - "fsd f4, (16 * 4)(%1)\n" - "fsd f5, (16 * 5)(%1)\n" - "fsd f6, (16 * 6)(%1)\n" - "fsd f7, (16 * 7)(%1)\n" - "fsd f8, (16 * 8)(%1)\n" - "fsd f9, (16 * 9)(%1)\n" - "fsd f10, (16 * 10)(%1)\n" - "fsd f11, (16 * 11)(%1)\n" - "fsd f12, (16 * 12)(%1)\n" - "fsd f13, (16 * 13)(%1)\n" - "fsd f14, (16 * 14)(%1)\n" - "fsd f15, (16 * 15)(%1)\n" - "fsd f16, (16 * 16)(%1)\n" - "fsd f17, (16 * 17)(%1)\n" - "fsd f18, (16 * 18)(%1)\n" - "fsd f19, (16 * 19)(%1)\n" - "fsd f20, (16 * 20)(%1)\n" - "fsd f21, (16 * 21)(%1)\n" - "fsd f22, (16 * 22)(%1)\n" - "fsd f23, (16 * 23)(%1)\n" - "fsd f24, (16 * 24)(%1)\n" - "fsd f25, (16 * 25)(%1)\n" - "fsd f26, (16 * 26)(%1)\n" - "fsd f27, (16 * 27)(%1)\n" - "fsd f28, (16 * 28)(%1)\n" - "fsd f29, (16 * 29)(%1)\n" - "fsd f30, (16 * 30)(%1)\n" - "fsd f31, (16 * 31)(%1)\n" - : "=&r"(fcsr), "=r"(fp_x), "=m"(*fp_x)); + "fsd f0, (16 * 0)(%2)\n" + "fsd f1, (16 * 1)(%2)\n" + "fsd f2, (16 * 2)(%2)\n" + "fsd f3, (16 * 3)(%2)\n" + "fsd f4, (16 * 4)(%2)\n" + "fsd f5, (16 * 5)(%2)\n" + "fsd f6, (16 * 6)(%2)\n" + "fsd f7, (16 * 7)(%2)\n" + "fsd f8, (16 * 8)(%2)\n" + "fsd f9, (16 * 9)(%2)\n" + "fsd f10, (16 * 10)(%2)\n" + "fsd f11, (16 * 11)(%2)\n" + "fsd f12, (16 * 12)(%2)\n" + "fsd f13, (16 * 13)(%2)\n" + "fsd f14, (16 * 14)(%2)\n" + "fsd f15, (16 * 15)(%2)\n" + "fsd f16, (16 * 16)(%2)\n" + "fsd f17, (16 * 17)(%2)\n" + "fsd f18, (16 * 18)(%2)\n" + "fsd f19, (16 * 19)(%2)\n" + "fsd f20, (16 * 20)(%2)\n" + "fsd f21, (16 * 21)(%2)\n" + "fsd f22, (16 * 22)(%2)\n" + "fsd f23, (16 * 23)(%2)\n" + "fsd f24, (16 * 24)(%2)\n" + "fsd f25, (16 * 25)(%2)\n" + "fsd f26, (16 * 26)(%2)\n" + "fsd f27, (16 * 27)(%2)\n" + "fsd f28, (16 * 28)(%2)\n" + "fsd f29, (16 * 29)(%2)\n" + "fsd f30, (16 * 30)(%2)\n" + "fsd f31, (16 * 31)(%2)\n" + : "=&r"(fcsr), "=m"(*fp_x) : "r"(fp_x)); regs->fp_fcsr = fcsr; } diff --git a/sys/riscv/starfive/jh7110_pcie.c b/sys/riscv/starfive/jh7110_pcie.c index 2d0a4be69b2c..5181252ab2dc 100644 --- a/sys/riscv/starfive/jh7110_pcie.c +++ b/sys/riscv/starfive/jh7110_pcie.c @@ -483,6 +483,16 @@ jh7110_pcie_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc) } static void +jh7110_pcie_msi_post_filter(device_t dev, struct intr_irqsrc *isrc) +{ +} + +static void +jh7110_pcie_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc) +{ +} + +static void jh7110_pcie_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc) { struct jh7110_pcie_softc *sc; @@ -1008,6 +1018,8 @@ static device_method_t jh7110_pcie_methods[] = { /* Interrupt controller interface */ DEVMETHOD(pic_enable_intr, jh7110_pcie_msi_enable_intr), DEVMETHOD(pic_disable_intr, jh7110_pcie_msi_disable_intr), + DEVMETHOD(pic_post_filter, jh7110_pcie_msi_post_filter), + DEVMETHOD(pic_post_ithread, jh7110_pcie_msi_post_ithread), DEVMETHOD(pic_pre_ithread, jh7110_pcie_msi_pre_ithread), /* OFW bus interface */ diff --git a/sys/riscv/vmm/riscv.h b/sys/riscv/vmm/riscv.h index 870d0d6c5cd1..917a333520ed 100644 --- a/sys/riscv/vmm/riscv.h +++ b/sys/riscv/vmm/riscv.h @@ -122,29 +122,6 @@ struct hyptrap { uint64_t htinst; }; -#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \ - ret_type vmmops_##opname args; - -DEFINE_VMMOPS_IFUNC(int, modinit, (void)) -DEFINE_VMMOPS_IFUNC(int, modcleanup, (void)) -DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap)) -DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging, - uint64_t gla, int prot, uint64_t *gpa, int *is_fault)) -DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap, - struct vm_eventinfo *info)) -DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi)) -DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, - int vcpu_id)) -DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui)) -DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t scause)) -DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)) -DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val)) -DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval)) -DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val)) -DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, - vm_offset_t max)) -DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace)) - #define dprintf(fmt, ...) struct hypctx *riscv_get_active_vcpu(void); diff --git a/sys/riscv/vmm/vmm.c b/sys/riscv/vmm/vmm.c index 7528ef6e4698..23b57ad3b7aa 100644 --- a/sys/riscv/vmm/vmm.c +++ b/sys/riscv/vmm/vmm.c @@ -38,7 +38,6 @@ #include <sys/linker.h> #include <sys/lock.h> #include <sys/malloc.h> -#include <sys/module.h> #include <sys/mutex.h> #include <sys/pcpu.h> #include <sys/proc.h> @@ -92,7 +91,6 @@ struct vcpu { struct fpreg *guestfpu; /* (a,i) guest fpu state */ }; -#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx)) #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) @@ -121,9 +119,8 @@ struct vm { bool dying; /* (o) is dying */ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ - struct vmspace *vmspace; /* (o) guest's address space */ struct vm_mem mem; /* (i) [m+v] guest memory */ - char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ + char name[VM_MAX_NAMELEN + 1]; /* (o) virtual machine name */ struct vcpu **vcpu; /* (i) guest vcpus */ struct vmm_mmio_region mmio_region[VM_MAX_MMIO_REGIONS]; /* (o) guest MMIO regions */ @@ -135,8 +132,6 @@ struct vm { struct sx vcpus_init_lock; /* (o) */ }; -static bool vmm_initialized = false; - static MALLOC_DEFINE(M_VMM, "vmm", "vmm"); /* statistics */ @@ -148,10 +143,6 @@ static int vmm_ipinum; SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, "IPI vector used for vcpu notifications"); -u_int vm_maxcpu; -SYSCTL_UINT(_hw_vmm, OID_AUTO, maxcpu, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, - &vm_maxcpu, 0, "Maximum number of vCPUs"); - static void vcpu_notify_event_locked(struct vcpu *vcpu); /* global statistics */ @@ -159,12 +150,6 @@ VMM_STAT(VMEXIT_COUNT, "total number of vm exits"); VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq"); VMM_STAT(VMEXIT_UNHANDLED, "number of vmexits for an unhandled exception"); -/* - * Upper limit on vm_maxcpu. We could increase this to 28 bits, but this - * is a safe value for now. - */ -#define VM_MAXCPU MIN(0xffff - 1, CPU_SETSIZE) - static void vcpu_cleanup(struct vcpu *vcpu, bool destroy) { @@ -174,6 +159,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy) vmm_stat_free(vcpu->stats); fpu_save_area_free(vcpu->guestfpu); vcpu_lock_destroy(vcpu); + free(vcpu, M_VMM); } } @@ -211,81 +197,24 @@ vm_exitinfo(struct vcpu *vcpu) return (&vcpu->exitinfo); } -static int -vmm_init(void) +int +vmm_modinit(void) { - - vm_maxcpu = mp_ncpus; - - TUNABLE_INT_FETCH("hw.vmm.maxcpu", &vm_maxcpu); - - if (vm_maxcpu > VM_MAXCPU) { - printf("vmm: vm_maxcpu clamped to %u\n", VM_MAXCPU); - vm_maxcpu = VM_MAXCPU; - } - - if (vm_maxcpu == 0) - vm_maxcpu = 1; - return (vmmops_modinit()); } -static int -vmm_handler(module_t mod, int what, void *arg) +int +vmm_modcleanup(void) { - int error; - - switch (what) { - case MOD_LOAD: - error = vmmdev_init(); - if (error != 0) - break; - error = vmm_init(); - if (error == 0) - vmm_initialized = true; - else - (void)vmmdev_cleanup(); - break; - case MOD_UNLOAD: - error = vmmdev_cleanup(); - if (error == 0 && vmm_initialized) { - error = vmmops_modcleanup(); - if (error) { - /* - * Something bad happened - prevent new - * VMs from being created - */ - vmm_initialized = false; - } - } - break; - default: - error = 0; - break; - } - return (error); + return (vmmops_modcleanup()); } -static moduledata_t vmm_kmod = { - "vmm", - vmm_handler, - NULL -}; - -/* - * vmm initialization has the following dependencies: - * - * - vmm device initialization requires an initialized devfs. - */ -DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_DEVFS + 1, SI_ORDER_ANY); -MODULE_VERSION(vmm, 1); - static void vm_init(struct vm *vm, bool create) { int i; - vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); + vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm))); MPASS(vm->cookie != NULL); CPU_ZERO(&vm->active_cpus); @@ -320,10 +249,6 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid) if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm)) return (NULL); - /* Some interrupt controllers may have a CPU limit */ - if (vcpuid >= aplic_max_cpu_count(vm->cookie)) - return (NULL); - vcpu = (struct vcpu *) atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]); if (__predict_true(vcpu != NULL)) @@ -347,9 +272,9 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid) } void -vm_slock_vcpus(struct vm *vm) +vm_lock_vcpus(struct vm *vm) { - sx_slock(&vm->vcpus_init_lock); + sx_xlock(&vm->vcpus_init_lock); } void @@ -362,26 +287,15 @@ int vm_create(const char *name, struct vm **retvm) { struct vm *vm; - struct vmspace *vmspace; - - /* - * If vmm.ko could not be successfully initialized then don't attempt - * to create the virtual machine. - */ - if (!vmm_initialized) - return (ENXIO); - - if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) - return (EINVAL); - - vmspace = vmmops_vmspace_alloc(0, 1ul << 39); - if (vmspace == NULL) - return (ENOMEM); + int error; vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO); + error = vm_mem_init(&vm->mem, 0, 1ul << 39); + if (error != 0) { + free(vm, M_VMM); + return (error); + } strcpy(vm->name, name); - vm->vmspace = vmspace; - vm_mem_init(&vm->mem); sx_init(&vm->vcpus_init_lock, "vm vcpus"); vm->sockets = 1; @@ -450,11 +364,6 @@ vm_cleanup(struct vm *vm, bool destroy) if (destroy) { vm_mem_destroy(vm); - vmmops_vmspace_free(vm->vmspace); - vm->vmspace = NULL; - - for (i = 0; i < vm->maxcpus; i++) - free(vm->vcpu[i], M_VMM); free(vm->vcpu, M_VMM); sx_destroy(&vm->vcpus_init_lock); } @@ -760,12 +669,6 @@ vcpu_notify_event(struct vcpu *vcpu) vcpu_unlock(vcpu); } -struct vmspace * -vm_vmspace(struct vm *vm) -{ - return (vm->vmspace); -} - struct vm_mem * vm_mem(struct vm *vm) { @@ -967,8 +870,7 @@ vcpu_get_state(struct vcpu *vcpu, int *hostcpu) int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval) { - - if (reg >= VM_REG_LAST) + if (reg < 0 || reg >= VM_REG_LAST) return (EINVAL); return (vmmops_getreg(vcpu->cookie, reg, retval)); @@ -979,7 +881,7 @@ vm_set_register(struct vcpu *vcpu, int reg, uint64_t val) { int error; - if (reg >= VM_REG_LAST) + if (reg < 0 || reg >= VM_REG_LAST) return (EINVAL); error = vmmops_setreg(vcpu->cookie, reg, val); if (error || reg != VM_REG_GUEST_SEPC) @@ -1036,10 +938,14 @@ vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, static int vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { + struct vm *vm; + vm = vcpu->vm; vcpu_lock(vcpu); - while (1) { + if (vm->suspend) + break; + if (aplic_check_pending(vcpu->cookie)) break; @@ -1080,7 +986,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu) vm = vcpu->vm; vme = &vcpu->exitinfo; - pmap = vmspace_pmap(vm->vmspace); + pmap = vmspace_pmap(vm_vmspace(vm)); addr = (vme->htval << 2) & ~(PAGE_SIZE - 1); dprintf("%s: %lx\n", __func__, addr); @@ -1103,7 +1009,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu) if (pmap_fault(pmap, addr, ftype)) return (0); - map = &vm->vmspace->vm_map; + map = &vm_vmspace(vm)->vm_map; rv = vm_fault(map, addr, ftype, VM_FAULT_NORMAL, NULL); if (rv != KERN_SUCCESS) { printf("%s: vm_fault failed, addr %lx, ftype %d, err %d\n", @@ -1185,7 +1091,7 @@ vm_run(struct vcpu *vcpu) if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) return (EINVAL); - pmap = vmspace_pmap(vm->vmspace); + pmap = vmspace_pmap(vm_vmspace(vm)); vme = &vcpu->exitinfo; evinfo.rptr = NULL; evinfo.sptr = &vm->suspend; diff --git a/sys/riscv/vmm/vmm_aplic.c b/sys/riscv/vmm/vmm_aplic.c index 4df41f2de1a5..74cb4fef4273 100644 --- a/sys/riscv/vmm/vmm_aplic.c +++ b/sys/riscv/vmm/vmm_aplic.c @@ -577,13 +577,3 @@ aplic_sync_hwstate(struct hypctx *hypctx) { } - -int -aplic_max_cpu_count(struct hyp *hyp) -{ - int16_t max_count; - - max_count = vm_get_maxcpus(hyp->vm); - - return (max_count); -} diff --git a/sys/riscv/vmm/vmm_aplic.h b/sys/riscv/vmm/vmm_aplic.h index 49510221b419..96018fe9adda 100644 --- a/sys/riscv/vmm/vmm_aplic.h +++ b/sys/riscv/vmm/vmm_aplic.h @@ -49,6 +49,5 @@ void aplic_cpuinit(struct hypctx *hypctx); void aplic_cpucleanup(struct hypctx *hypctx); void aplic_flush_hwstate(struct hypctx *hypctx); void aplic_sync_hwstate(struct hypctx *hypctx); -int aplic_max_cpu_count(struct hyp *hyp); #endif /* !_VMM_APLIC_H_ */ diff --git a/sys/riscv/vmm/vmm_dev_machdep.c b/sys/riscv/vmm/vmm_dev_machdep.c index ba15d8dcd79e..c736b10dc9c0 100644 --- a/sys/riscv/vmm/vmm_dev_machdep.c +++ b/sys/riscv/vmm/vmm_dev_machdep.c @@ -67,18 +67,13 @@ int vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data, int fflag, struct thread *td) { - struct vm_run *vmrun; - struct vm_aplic_descr *aplic; - struct vm_irq *vi; - struct vm_exception *vmexc; - struct vm_gla2gpa *gg; - struct vm_msi *vmsi; int error; error = 0; switch (cmd) { case VM_RUN: { struct vm_exit *vme; + struct vm_run *vmrun; vmrun = (struct vm_run *)data; vme = vm_exitinfo(vcpu); @@ -90,34 +85,52 @@ vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd, caddr_t data, error = copyout(vme, vmrun->vm_exit, sizeof(*vme)); break; } - case VM_INJECT_EXCEPTION: + case VM_INJECT_EXCEPTION: { + struct vm_exception *vmexc; + vmexc = (struct vm_exception *)data; error = vm_inject_exception(vcpu, vmexc->scause); break; - case VM_GLA2GPA_NOFAULT: + } + case VM_GLA2GPA_NOFAULT: { + struct vm_gla2gpa *gg; + gg = (struct vm_gla2gpa *)data; error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla, gg->prot, &gg->gpa, &gg->fault); KASSERT(error == 0 || error == EFAULT, ("%s: vm_gla2gpa unknown error %d", __func__, error)); break; - case VM_ATTACH_APLIC: + } + case VM_ATTACH_APLIC: { + struct vm_aplic_descr *aplic; + aplic = (struct vm_aplic_descr *)data; error = vm_attach_aplic(vm, aplic); break; - case VM_RAISE_MSI: + } + case VM_RAISE_MSI: { + struct vm_msi *vmsi; + vmsi = (struct vm_msi *)data; error = vm_raise_msi(vm, vmsi->msg, vmsi->addr, vmsi->bus, vmsi->slot, vmsi->func); break; - case VM_ASSERT_IRQ: + } + case VM_ASSERT_IRQ: { + struct vm_irq *vi; + vi = (struct vm_irq *)data; error = vm_assert_irq(vm, vi->irq); break; - case VM_DEASSERT_IRQ: + } + case VM_DEASSERT_IRQ: { + struct vm_irq *vi; + vi = (struct vm_irq *)data; error = vm_deassert_irq(vm, vi->irq); break; + } default: error = ENOTTY; break; |
