diff options
Diffstat (limited to 'sys/riscv')
31 files changed, 2070 insertions, 126 deletions
diff --git a/sys/riscv/allwinner/files.allwinner b/sys/riscv/allwinner/files.allwinner index 423a89c10c78..7a4ff6b9c62e 100644 --- a/sys/riscv/allwinner/files.allwinner +++ b/sys/riscv/allwinner/files.allwinner @@ -1,5 +1,7 @@ arm/allwinner/aw_gpio.c optional gpio aw_gpio fdt +arm/allwinner/aw_mmc.c optional mmc aw_mmc fdt | mmccam aw_mmc fdt +arm/allwinner/aw_rtc.c optional aw_rtc fdt arm/allwinner/aw_syscon.c optional syscon arm/allwinner/aw_sid.c optional aw_sid nvmem arm/allwinner/aw_timer.c optional aw_timer fdt diff --git a/sys/riscv/conf/GENERIC b/sys/riscv/conf/GENERIC index 7d7d0ca6e79c..a8500fe80019 100644 --- a/sys/riscv/conf/GENERIC +++ b/sys/riscv/conf/GENERIC @@ -206,6 +206,7 @@ device iicoc # OpenCores I2C controller support # Include SoC specific configuration include "std.allwinner" +include "std.cvitek" include "std.eswin" include "std.sifive" include "std.starfive" diff --git a/sys/riscv/conf/std.allwinner b/sys/riscv/conf/std.allwinner index 1bf6b027a4cb..34fe195b01ba 100644 --- a/sys/riscv/conf/std.allwinner +++ b/sys/riscv/conf/std.allwinner @@ -7,6 +7,8 @@ options SOC_ALLWINNER_D1 device aw_ccu # Allwinner clock controller device aw_gpio # Allwinner GPIO controller +device aw_mmc # Allwinner SD/MMC controller +device aw_rtc # Allwinner Real-time Clock device aw_sid # Allwinner Secure ID EFUSE device aw_timer # Allwinner Timer device aw_usbphy # Allwinner USB PHY diff --git a/sys/riscv/conf/std.cvitek b/sys/riscv/conf/std.cvitek new file mode 100644 index 000000000000..8eb146282462 --- /dev/null +++ b/sys/riscv/conf/std.cvitek @@ -0,0 +1,14 @@ +# +# CVITEK SoC support +# + +device fdt +device dwc +device dwgpio +device uart_snps +device dwc_cvitek +device sdhci_cvitek +device cvitek_reset +device cvitek_restart + +files "../cvitek/files.cvitek" diff --git a/sys/riscv/cvitek/cvitek_reset.c b/sys/riscv/cvitek/cvitek_reset.c new file mode 100644 index 000000000000..348bbc85c9a9 --- /dev/null +++ b/sys/riscv/cvitek/cvitek_reset.c @@ -0,0 +1,132 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 Bojan Novković <bnovkov@FreeBSD.org + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <sys/kernel.h> +#include <sys/lock.h> +#include <sys/module.h> +#include <sys/mutex.h> +#include <sys/rman.h> + +#include <machine/bus.h> +#include <machine/cpu.h> +#include <machine/bus.h> +#include <machine/resource.h> + +#include <dev/hwreset/hwreset.h> +#include <dev/syscon/syscon.h> +#include <dev/ofw/ofw_bus.h> +#include <dev/ofw/ofw_bus_subr.h> +#include <dev/ofw/openfirm.h> + +#include "syscon_if.h" +#include "hwreset_if.h" + +struct cvitek_reset_softc { + device_t dev; + struct mtx mtx; + struct syscon *syscon; +}; + +static int +cvitek_reset_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_is_compatible(dev, "cvitek,reset")) { + device_set_desc(dev, "CVITEK reset controller"); + return (BUS_PROBE_DEFAULT); + } + + return (ENXIO); +} + +static int +cvitek_reset_attach(device_t dev) +{ + struct cvitek_reset_softc *sc; + int error; + + sc = device_get_softc(dev); + sc->dev = dev; + + error = syscon_get_by_ofw_property(dev, ofw_bus_get_node(dev), + "syscon", &sc->syscon); + if (error != 0) { + device_printf(dev, "Couldn't get syscon handle\n"); + return (error); + } + mtx_init(&sc->mtx, device_get_nameunit(sc->dev), NULL, MTX_DEF); + + hwreset_register_ofw_provider(dev); + + return (0); +} + +static int +cvitek_reset_assert(device_t dev, intptr_t id, bool reset) +{ + struct cvitek_reset_softc *sc; + uint32_t offset, val; + uint32_t bit; + + sc = device_get_softc(dev); + bit = id % 32; + offset = id / 32; + + mtx_lock(&sc->mtx); + val = SYSCON_READ_4(sc->syscon, offset); + if (reset) + val &= ~(1 << bit); + else + val |= (1 << bit); + SYSCON_WRITE_4(sc->syscon, offset, val); + mtx_unlock(&sc->mtx); + + return (0); +} + +static device_method_t cvitek_reset_methods[] = { + DEVMETHOD(device_probe, cvitek_reset_probe), + DEVMETHOD(device_attach, cvitek_reset_attach), + + DEVMETHOD(hwreset_assert, cvitek_reset_assert), + + DEVMETHOD_END +}; + +static driver_t cvitek_reset_driver = { + "cvitek_reset", + cvitek_reset_methods, + sizeof(struct cvitek_reset_softc) +}; + +DRIVER_MODULE(cvitek_reset, simplebus, cvitek_reset_driver, 0, 0); diff --git a/sys/riscv/cvitek/cvitek_restart.c b/sys/riscv/cvitek/cvitek_restart.c new file mode 100644 index 000000000000..53df126d5171 --- /dev/null +++ b/sys/riscv/cvitek/cvitek_restart.c @@ -0,0 +1,158 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2024 Bojan Novković <bnovkov@FreeBSD.org> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Driver for cvitek's poweroff/restart controller. + */ + +#include <sys/param.h> +#include <sys/bus.h> +#include <sys/types.h> +#include <sys/eventhandler.h> +#include <sys/kernel.h> +#include <sys/module.h> +#include <sys/reboot.h> +#include <sys/rman.h> + +#include <machine/bus.h> +#include <machine/resource.h> + +#include <dev/ofw/ofw_bus.h> +#include <dev/ofw/ofw_bus_subr.h> +#include <dev/ofw/openfirm.h> + +#define RTC_CTRL0_UNLOCK 0x4 +#define RTC_CTRL0_UNLOCK_KEY 0xAB18 + +#define RTC_CTRL0 0x8 +#define RTC_CTRL0_RESERVED_MASK 0xFFFF0800 +#define RTC_CTRL0_REQ_SHUTDOWN 0x01 +#define RTC_CTRL0_REQ_POWERCYCLE 0x08 +#define RTC_CTRL0_REQ_WARM_RESET 0x10 + +#define RTC_BASE_OFFSET 0x1000 +#define RTC_EN_SHDN_REQ (RTC_BASE_OFFSET + 0xC0) +#define RTC_EN_PWR_CYC_REQ (RTC_BASE_OFFSET + 0xC8) +#define RTC_EN_WARM_RST_REQ (RTC_BASE_OFFSET + 0xCC) + +struct cvitek_restart_softc { + int reg_rid; + struct resource *reg; + eventhandler_tag tag; +}; + +static void +cvitek_restart_shutdown_final(device_t dev, int howto) +{ + struct cvitek_restart_softc *sc; + uint32_t val; + + sc = device_get_softc(dev); + val = RTC_CTRL0_RESERVED_MASK; + if ((howto & RB_POWEROFF) != 0) + val |= RTC_CTRL0_REQ_SHUTDOWN; + else if ((howto & RB_POWERCYCLE) != 0) + val |= RTC_CTRL0_REQ_POWERCYCLE; + else + val |= RTC_CTRL0_REQ_WARM_RESET; + + /* Unlock writes to 'rtc_ctrl0'. */ + bus_write_4(sc->reg, RTC_CTRL0_UNLOCK, RTC_CTRL0_UNLOCK_KEY); + bus_write_4(sc->reg, RTC_CTRL0, val); + DELAY(1000); + + device_printf(dev, "Poweroff request failed\n"); +} + +static int +cvitek_restart_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_is_compatible(dev, "cvitek,restart")) { + device_set_desc(dev, "Cvitek restart controller"); + return (BUS_PROBE_DEFAULT); + } + return (ENXIO); +} + +static int +cvitek_restart_attach(device_t dev) +{ + struct cvitek_restart_softc *sc; + + sc = device_get_softc(dev); + sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->reg_rid, + RF_ACTIVE); + if (sc->reg == NULL) { + device_printf(dev, "can't map RTC regs\n"); + return (ENXIO); + } + + /* Enable requests for various poweroff methods. */ + bus_write_4(sc->reg, RTC_EN_SHDN_REQ, 0x1); + bus_write_4(sc->reg, RTC_EN_PWR_CYC_REQ, 0x1); + bus_write_4(sc->reg, RTC_EN_WARM_RST_REQ, 0x1); + + sc->tag = EVENTHANDLER_REGISTER(shutdown_final, + cvitek_restart_shutdown_final, dev, SHUTDOWN_PRI_LAST); + + return (0); +} + +static int +cvitek_restart_detach(device_t dev) +{ + struct cvitek_restart_softc *sc; + + sc = device_get_softc(dev); + if (sc->reg == NULL) + return (0); + + bus_write_4(sc->reg, RTC_EN_SHDN_REQ, 0x0); + bus_write_4(sc->reg, RTC_EN_PWR_CYC_REQ, 0x0); + bus_write_4(sc->reg, RTC_EN_WARM_RST_REQ, 0x0); + + bus_release_resource(dev, SYS_RES_MEMORY, sc->reg_rid, sc->reg); + EVENTHANDLER_DEREGISTER(shutdown_final, sc->tag); + + return (0); +} + +static device_method_t cvitek_restart_methods[] = { + DEVMETHOD(device_probe, cvitek_restart_probe), + DEVMETHOD(device_attach, cvitek_restart_attach), + DEVMETHOD(device_detach, cvitek_restart_detach), + + DEVMETHOD_END +}; + +DEFINE_CLASS_0(cvitek_restart, cvitek_restart_driver, cvitek_restart_methods, + sizeof(struct cvitek_restart_softc)); +DRIVER_MODULE(cvitek_restart, simplebus, cvitek_restart_driver, NULL, NULL); diff --git a/sys/riscv/cvitek/files.cvitek b/sys/riscv/cvitek/files.cvitek new file mode 100644 index 000000000000..bf89ee390403 --- /dev/null +++ b/sys/riscv/cvitek/files.cvitek @@ -0,0 +1,4 @@ +riscv/cvitek/cvitek_restart.c optional fdt cvitek_restart +riscv/cvitek/cvitek_reset.c optional fdt cvitek_reset +dev/dwc/if_dwc_cvitek.c optional fdt dwc_cvitek +dev/sdhci/sdhci_fdt_cvitek.c optional fdt sdhci sdhci_cvitek regulator diff --git a/sys/riscv/include/atomic.h b/sys/riscv/include/atomic.h index 74ffc171b028..c90cb02c482c 100644 --- a/sys/riscv/include/atomic.h +++ b/sys/riscv/include/atomic.h @@ -656,4 +656,7 @@ atomic_thread_fence_seq_cst(void) #include <sys/_atomic_subword.h> +#define atomic_set_short atomic_set_16 +#define atomic_clear_short atomic_clear_16 + #endif /* _MACHINE_ATOMIC_H_ */ diff --git a/sys/riscv/include/runq.h b/sys/riscv/include/cbo.h index 3a7de010cd72..cdf10069ba66 100644 --- a/sys/riscv/include/runq.h +++ b/sys/riscv/include/cbo.h @@ -1,6 +1,7 @@ /*- - * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org> - * All rights reserved. + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,21 +25,9 @@ * SUCH DAMAGE. */ -#ifndef _MACHINE_RUNQ_H_ -#define _MACHINE_RUNQ_H_ - -#define RQB_LEN (1) /* Number of priority status words. */ -#define RQB_L2BPW (6) /* Log2(sizeof(rqb_word_t) * NBBY)). */ -#define RQB_BPW (1<<RQB_L2BPW) /* Bits in an rqb_word_t. */ - -#define RQB_BIT(pri) (1ul << ((pri) & (RQB_BPW - 1))) -#define RQB_WORD(pri) ((pri) >> RQB_L2BPW) +#ifndef _RISCV_CBO_H_ +#define _RISCV_CBO_H_ -#define RQB_FFS(word) (ffsl(word) - 1) - -/* - * Type of run queue status word. - */ -typedef unsigned long rqb_word_t; +void cbo_zicbom_setup_cache(int cbom_block_size); -#endif +#endif /* _RISCV_CBO_H_ */ diff --git a/sys/riscv/include/cpu.h b/sys/riscv/include/cpu.h index a204b21a4a74..13a9f1ca0603 100644 --- a/sys/riscv/include/cpu.h +++ b/sys/riscv/include/cpu.h @@ -80,6 +80,7 @@ /* SiFive marchid values */ #define MARCHID_SIFIVE_U7 MARCHID_COMMERCIAL(7) +#define MARCHID_SIFIVE_P5 MARCHID_COMMERCIAL(8) /* * MMU virtual-addressing modes. Support for each level implies the previous, diff --git a/sys/riscv/include/ieeefp.h b/sys/riscv/include/ieeefp.h index 03a96e8a000f..84b554a04c65 100644 --- a/sys/riscv/include/ieeefp.h +++ b/sys/riscv/include/ieeefp.h @@ -5,4 +5,9 @@ /* TODO */ typedef int fp_except_t; +__BEGIN_DECLS +extern fp_except_t fpgetmask(void); +extern fp_except_t fpsetmask(fp_except_t); +__END_DECLS + #endif /* _MACHINE_IEEEFP_H_ */ diff --git a/sys/riscv/include/metadata.h b/sys/riscv/include/metadata.h index ddbad3fae3b4..7f7b763577e3 100644 --- a/sys/riscv/include/metadata.h +++ b/sys/riscv/include/metadata.h @@ -33,10 +33,15 @@ #define MODINFOMD_EFI_FB 0x1003 #define MODINFOMD_BOOT_HARTID 0x1004 +/* + * This is not the same as the UEFI standard EFI_MEMORY_ATTRIBUTES_TABLE, though + * memory_size / descritpr_size entries of EFI_MEMORY_DESCRIPTORS follow this table + * starting at a 16-byte alignment. + */ struct efi_map_header { - size_t memory_size; - size_t descriptor_size; - uint32_t descriptor_version; + size_t memory_size; /* Numnber of bytes that follow */ + size_t descriptor_size; /* Size of each EFI_MEMORY_DESCRIPTOR */ + uint32_t descriptor_version; /* Currently '1' */ }; /* diff --git a/sys/riscv/include/vmm.h b/sys/riscv/include/vmm.h index 1221521be368..e227dd825966 100644 --- a/sys/riscv/include/vmm.h +++ b/sys/riscv/include/vmm.h @@ -49,6 +49,7 @@ enum vm_suspend_how { VM_SUSPEND_RESET, VM_SUSPEND_POWEROFF, VM_SUSPEND_HALT, + VM_SUSPEND_DESTROY, VM_SUSPEND_LAST }; @@ -122,10 +123,33 @@ struct vm_eventinfo { int *iptr; /* reqidle cookie */ }; +#define DECLARE_VMMOPS_FUNC(ret_type, opname, args) \ + ret_type vmmops_##opname args + +DECLARE_VMMOPS_FUNC(int, modinit, (void)); +DECLARE_VMMOPS_FUNC(int, modcleanup, (void)); +DECLARE_VMMOPS_FUNC(void *, init, (struct vm *vm, struct pmap *pmap)); +DECLARE_VMMOPS_FUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging, + uint64_t gla, int prot, uint64_t *gpa, int *is_fault)); +DECLARE_VMMOPS_FUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap, + struct vm_eventinfo *info)); +DECLARE_VMMOPS_FUNC(void, cleanup, (void *vmi)); +DECLARE_VMMOPS_FUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, + int vcpu_id)); +DECLARE_VMMOPS_FUNC(void, vcpu_cleanup, (void *vcpui)); +DECLARE_VMMOPS_FUNC(int, exception, (void *vcpui, uint64_t scause)); +DECLARE_VMMOPS_FUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)); +DECLARE_VMMOPS_FUNC(int, setreg, (void *vcpui, int num, uint64_t val)); +DECLARE_VMMOPS_FUNC(int, getcap, (void *vcpui, int num, int *retval)); +DECLARE_VMMOPS_FUNC(int, setcap, (void *vcpui, int num, int val)); +DECLARE_VMMOPS_FUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, + vm_offset_t max)); +DECLARE_VMMOPS_FUNC(void, vmspace_free, (struct vmspace *vmspace)); + int vm_create(const char *name, struct vm **retvm); struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid); void vm_disable_vcpu_creation(struct vm *vm); -void vm_slock_vcpus(struct vm *vm); +void vm_lock_vcpus(struct vm *vm); void vm_unlock_vcpus(struct vm *vm); void vm_destroy(struct vm *vm); int vm_reinit(struct vm *vm); @@ -211,7 +235,6 @@ vcpu_should_yield(struct vcpu *vcpu) void *vcpu_stats(struct vcpu *vcpu); void vcpu_notify_event(struct vcpu *vcpu); -struct vmspace *vm_vmspace(struct vm *vm); struct vm_mem *vm_mem(struct vm *vm); enum vm_reg_name vm_segment_name(int seg_encoding); diff --git a/sys/riscv/include/vmm_dev.h b/sys/riscv/include/vmm_dev.h index 856ff0778b95..4d30d5a1c35b 100644 --- a/sys/riscv/include/vmm_dev.h +++ b/sys/riscv/include/vmm_dev.h @@ -34,6 +34,8 @@ #ifndef _VMM_DEV_H_ #define _VMM_DEV_H_ +#include <sys/domainset.h> + #include <machine/vmm.h> struct vm_memmap { @@ -56,6 +58,9 @@ struct vm_memseg { int segid; size_t len; char name[VM_MAX_SUFFIXLEN + 1]; + domainset_t *ds_mask; + size_t ds_mask_size; + int ds_policy; }; struct vm_register { diff --git a/sys/riscv/riscv/busdma_bounce.c b/sys/riscv/riscv/busdma_bounce.c index f652f08bf5dc..9d9556fc72f9 100644 --- a/sys/riscv/riscv/busdma_bounce.c +++ b/sys/riscv/riscv/busdma_bounce.c @@ -672,6 +672,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, map->pagesneeded != 0 && addr_needs_bounce(dmat, curaddr)) { sgsize = roundup2(sgsize, dmat->common.alignment); + sgsize = MIN(sgsize, buflen); curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, sgsize); } else if ((dmat->bounce_flags & BF_COHERENT) == 0) { diff --git a/sys/riscv/riscv/cbo.c b/sys/riscv/riscv/cbo.c new file mode 100644 index 000000000000..9b8891c514af --- /dev/null +++ b/sys/riscv/riscv/cbo.c @@ -0,0 +1,104 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* Cache Block Operations. */ + +#include <sys/param.h> +#include <sys/systm.h> + +#include <machine/cbo.h> + +static void +cbo_zicbom_cpu_dcache_wbinv_range(vm_offset_t va, vm_size_t len) +{ + vm_offset_t addr; + + /* + * A flush operation atomically performs a clean operation followed by + * an invalidate operation. + */ + + va &= ~(dcache_line_size - 1); + for (addr = va; addr < va + len; addr += dcache_line_size) + __asm __volatile(".option push; .option arch, +zicbom\n" + "cbo.flush (%0); .option pop\n" :: "r"(addr)); +} + +static void +cbo_zicbom_cpu_dcache_inv_range(vm_offset_t va, vm_size_t len) +{ + vm_offset_t addr; + + /* + * An invalidate operation makes data from store operations performed by + * a set of non-coherent agents visible to the set of coherent agents at + * a point common to both sets by deallocating all copies of a cache + * block from the set of coherent caches up to that point. + */ + + va &= ~(dcache_line_size - 1); + for (addr = va; addr < va + len; addr += dcache_line_size) + __asm __volatile(".option push; .option arch, +zicbom\n" + "cbo.inval (%0); .option pop\n" :: "r"(addr)); +} + +static void +cbo_zicbom_cpu_dcache_wb_range(vm_offset_t va, vm_size_t len) +{ + vm_offset_t addr; + + /* + * A clean operation makes data from store operations performed by the + * set of coherent agents visible to a set of non-coherent agents at a + * point common to both sets by performing a write transfer of a copy of + * a cache block to that point provided a coherent agent performed a + * store operation that modified the data in the cache block since the + * previous invalidate, clean, or flush operation on the cache block. + */ + + va &= ~(dcache_line_size - 1); + for (addr = va; addr < va + len; addr += dcache_line_size) + __asm __volatile(".option push; .option arch, +zicbom\n" + "cbo.clean (%0); .option pop\n" :: "r"(addr)); +} + +void +cbo_zicbom_setup_cache(int cbom_block_size) +{ + struct riscv_cache_ops zicbom_ops; + + if (cbom_block_size <= 0 || !powerof2(cbom_block_size)) { + printf("Zicbom: could not initialise (invalid cache line %d)\n", + cbom_block_size); + return; + } + + zicbom_ops.dcache_wbinv_range = cbo_zicbom_cpu_dcache_wbinv_range; + zicbom_ops.dcache_inv_range = cbo_zicbom_cpu_dcache_inv_range; + zicbom_ops.dcache_wb_range = cbo_zicbom_cpu_dcache_wb_range; + riscv_cache_install_hooks(&zicbom_ops, cbom_block_size); +} diff --git a/sys/riscv/riscv/elf_machdep.c b/sys/riscv/riscv/elf_machdep.c index 67b1fcc4c1a9..5bd4af4c15f8 100644 --- a/sys/riscv/riscv/elf_machdep.c +++ b/sys/riscv/riscv/elf_machdep.c @@ -100,7 +100,7 @@ static struct sysentvec elf64_freebsd_sysvec = { }; INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec); -static Elf64_Brandinfo freebsd_brand_info = { +static const Elf64_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_RISCV, .compat_3_brand = "FreeBSD", @@ -110,7 +110,7 @@ static Elf64_Brandinfo freebsd_brand_info = { .brand_note = &elf64_freebsd_brandnote, .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; -SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, +C_SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info); static void diff --git a/sys/riscv/riscv/identcpu.c b/sys/riscv/riscv/identcpu.c index 54e008122eab..af71cc2f89f0 100644 --- a/sys/riscv/riscv/identcpu.c +++ b/sys/riscv/riscv/identcpu.c @@ -53,6 +53,7 @@ #include <machine/elf.h> #include <machine/md_var.h> #include <machine/thead.h> +#include <machine/cbo.h> #ifdef FDT #include <dev/fdt/fdt_common.h> @@ -78,6 +79,11 @@ bool __read_frequently has_sstc; bool __read_frequently has_sscofpmf; bool has_svpbmt; +/* Z-extensions support. */ +bool has_zicbom; +bool has_zicboz; +bool has_zicbop; + struct cpu_desc { const char *cpu_mvendor_name; const char *cpu_march_name; @@ -89,6 +95,12 @@ struct cpu_desc { #define SV_SVPBMT (1 << 2) #define SV_SVINVAL (1 << 3) #define SV_SSCOFPMF (1 << 4) + u_int z_extensions; /* Multi-letter extensions. */ +#define Z_ZICBOM (1 << 0) +#define Z_ZICBOZ (1 << 1) +#define Z_ZICBOP (1 << 2) + int cbom_block_size; + int cboz_block_size; }; struct cpu_desc cpu_desc[MAXCPU]; @@ -114,6 +126,7 @@ static const struct marchid_entry global_marchids[] = { static const struct marchid_entry sifive_marchids[] = { { MARCHID_SIFIVE_U7, "6/7/P200/X200-Series Processor" }, + { MARCHID_SIFIVE_P5, "P550/P650 Processor" }, MARCHID_END }; @@ -196,11 +209,24 @@ parse_ext_x(struct cpu_desc *desc __unused, char *isa, int idx, int len) static __inline int parse_ext_z(struct cpu_desc *desc __unused, char *isa, int idx, int len) { +#define CHECK_Z_EXT(str, flag) \ + do { \ + if (strncmp(&isa[idx], (str), \ + MIN(strlen(str), len - idx)) == 0) { \ + desc->z_extensions |= flag; \ + return (idx + strlen(str)); \ + } \ + } while (0) + + /* Check for known/supported extensions. */ + CHECK_Z_EXT("zicbom", Z_ZICBOM); + CHECK_Z_EXT("zicboz", Z_ZICBOZ); + CHECK_Z_EXT("zicbop", Z_ZICBOP); + +#undef CHECK_Z_EXT /* * Proceed to the next multi-letter extension or the end of the * string. - * - * TODO: parse some of these. */ while (isa[idx] != '_' && idx < len) { idx++; @@ -322,6 +348,22 @@ parse_mmu_fdt(struct cpu_desc *desc, phandle_t node) } static void +parse_cbo_fdt(struct cpu_desc *desc, phandle_t node) +{ + int error; + + error = OF_getencprop(node, "riscv,cbom-block-size", + &desc->cbom_block_size, sizeof(desc->cbom_block_size)); + if (error == -1) + desc->cbom_block_size = 0; + + error = OF_getencprop(node, "riscv,cboz-block-size", + &desc->cboz_block_size, sizeof(desc->cboz_block_size)); + if (error == -1) + desc->cboz_block_size = 0; +} + +static void identify_cpu_features_fdt(u_int cpu, struct cpu_desc *desc) { char isa[1024]; @@ -372,6 +414,9 @@ identify_cpu_features_fdt(u_int cpu, struct cpu_desc *desc) /* Check MMU features. */ parse_mmu_fdt(desc, node); + /* Cache-block operations (CBO). */ + parse_cbo_fdt(desc, node); + /* We are done. */ break; } @@ -422,6 +467,11 @@ update_global_capabilities(u_int cpu, struct cpu_desc *desc) UPDATE_CAP(has_sscofpmf, (desc->smode_extensions & SV_SSCOFPMF) != 0); UPDATE_CAP(has_svpbmt, (desc->smode_extensions & SV_SVPBMT) != 0); + /* Z extension support. */ + UPDATE_CAP(has_zicbom, (desc->z_extensions & Z_ZICBOM) != 0); + UPDATE_CAP(has_zicboz, (desc->z_extensions & Z_ZICBOZ) != 0); + UPDATE_CAP(has_zicbop, (desc->z_extensions & Z_ZICBOP) != 0); + #undef UPDATE_CAP } @@ -506,6 +556,9 @@ identify_cpu(u_int cpu) update_global_capabilities(cpu, desc); handle_cpu_quirks(cpu, desc); + + if (has_zicbom && cpu == 0) + cbo_zicbom_setup_cache(desc->cbom_block_size); } void diff --git a/sys/riscv/riscv/intc.c b/sys/riscv/riscv/intc.c index 248175e8bea3..b700b9c97793 100644 --- a/sys/riscv/riscv/intc.c +++ b/sys/riscv/riscv/intc.c @@ -127,7 +127,7 @@ intc_identify(driver_t *driver, device_t parent) device_t dev; phandle_t node; - if (device_find_child(parent, "intc", -1) != NULL) + if (device_find_child(parent, "intc", DEVICE_UNIT_ANY) != NULL) return; node = intc_ofw_find(parent, PCPU_GET(hart)); diff --git a/sys/riscv/riscv/machdep.c b/sys/riscv/riscv/machdep.c index fea4ca9a7b92..235cc651b87e 100644 --- a/sys/riscv/riscv/machdep.c +++ b/sys/riscv/riscv/machdep.c @@ -44,7 +44,6 @@ #include <sys/bus.h> #include <sys/cons.h> #include <sys/cpu.h> -#include <sys/devmap.h> #include <sys/efi_map.h> #include <sys/exec.h> #include <sys/imgact.h> @@ -157,8 +156,6 @@ cpu_startup(void *dummy) printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_free_count()), ptoa((uintmax_t)vm_free_count()) / (1024 * 1024)); - if (bootverbose) - devmap_print_table(); bufinit(); vm_pager_bufferinit(); @@ -506,15 +503,65 @@ parse_metadata(void) return (lastaddr); } +#ifdef FDT +static void +fdt_physmem_hardware_region_cb(const struct mem_region *mr, void *arg) +{ + bool *first = arg; + + physmem_hardware_region(mr->mr_start, mr->mr_size); + + if (*first) { + /* + * XXX: Unconditionally exclude the lowest 2MB of physical + * memory, as this area is assumed to contain the SBI firmware, + * and this is not properly reserved in all cases (e.g. in + * older firmware like BBL). + * + * This is a little fragile, but it is consistent with the + * platforms we support so far. + * + * TODO: remove this when the all regular booting methods + * properly report their reserved memory in the device tree. + */ + physmem_exclude_region(mr->mr_start, L2_SIZE, + EXFLAG_NODUMP | EXFLAG_NOALLOC); + *first = false; + } +} + +static void +fdt_physmem_exclude_region_cb(const struct mem_region *mr, void *arg __unused) +{ + physmem_exclude_region(mr->mr_start, mr->mr_size, + EXFLAG_NODUMP | EXFLAG_NOALLOC); +} +#endif + +static void +efi_exclude_sbi_pmp_cb(struct efi_md *p, void *argp) +{ + bool *first = (bool *)argp; + + if (!*first) + return; + + *first = false; + if (p->md_type == EFI_MD_TYPE_BS_DATA) { + physmem_exclude_region(p->md_phys, + min(p->md_pages * EFI_PAGE_SIZE, L2_SIZE), + EXFLAG_NOALLOC); + } +} + void initriscv(struct riscv_bootparams *rvbp) { - struct mem_region mem_regions[FDT_MEM_REGIONS]; struct efi_map_header *efihdr; struct pcpu *pcpup; - int mem_regions_sz; vm_offset_t lastaddr; vm_size_t kernlen; + bool first; char *env; TSRAW(&thread0, TS_ENTER, __func__, NULL); @@ -544,34 +591,31 @@ initriscv(struct riscv_bootparams *rvbp) if (efihdr != NULL) { efi_map_add_entries(efihdr); efi_map_exclude_entries(efihdr); + + /* + * OpenSBI uses the first PMP entry to prevent buggy supervisor + * software from overwriting the firmware. However, this + * region may not be properly marked as reserved, leading + * to an access violation exception whenever the kernel + * attempts to write to a page from that region. + * + * Fix this by excluding first EFI memory map entry + * if it is marked as "BootServicesData". + */ + first = true; + efi_map_foreach_entry(efihdr, efi_exclude_sbi_pmp_cb, &first); } #ifdef FDT else { /* Exclude reserved memory specified by the device tree. */ - if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) { - physmem_exclude_regions(mem_regions, mem_regions_sz, - EXFLAG_NODUMP | EXFLAG_NOALLOC); - } + fdt_foreach_reserved_mem(fdt_physmem_exclude_region_cb, NULL); /* Grab physical memory regions information from device tree. */ - if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) + first = true; + if (fdt_foreach_mem_region(fdt_physmem_hardware_region_cb, + &first) != 0) panic("Cannot get physical memory regions"); - physmem_hardware_regions(mem_regions, mem_regions_sz); - /* - * XXX: Unconditionally exclude the lowest 2MB of physical - * memory, as this area is assumed to contain the SBI firmware, - * and this is not properly reserved in all cases (e.g. in - * older firmware like BBL). - * - * This is a little fragile, but it is consistent with the - * platforms we support so far. - * - * TODO: remove this when the all regular booting methods - * properly report their reserved memory in the device tree. - */ - physmem_exclude_region(mem_regions[0].mr_start, L2_SIZE, - EXFLAG_NODUMP | EXFLAG_NOALLOC); } #endif @@ -589,9 +633,6 @@ initriscv(struct riscv_bootparams *rvbp) physmem_init_kernel_globals(); - /* Establish static device mappings */ - devmap_bootstrap(); - cninit(); /* diff --git a/sys/riscv/riscv/nexus.c b/sys/riscv/riscv/nexus.c index 836e1648edeb..d08274aba9b2 100644 --- a/sys/riscv/riscv/nexus.c +++ b/sys/riscv/riscv/nexus.c @@ -100,6 +100,7 @@ static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_fdt_probe), DEVMETHOD(device_attach, nexus_attach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), /* OFW interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index c8a69f9674e9..26efaecc64d1 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -183,12 +183,13 @@ #define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT)) #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) +#define pa_index(pa) ((pa) >> L2_SHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ - (&pv_list_locks[pmap_l2_pindex(pa) % NPV_LIST_LOCKS]) + (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ @@ -240,6 +241,11 @@ vm_paddr_t dmap_phys_base; /* The start of the dmap region */ vm_paddr_t dmap_phys_max; /* The limit of the dmap region */ vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */ +static int pmap_growkernel_panic = 0; +SYSCTL_INT(_vm_pmap, OID_AUTO, growkernel_panic, CTLFLAG_RDTUN, + &pmap_growkernel_panic, 0, + "panic on failure to allocate kernel page table page"); + /* This code assumes all L1 DMAP entries will be used */ CTASSERT((DMAP_MIN_ADDRESS & ~L1_OFFSET) == DMAP_MIN_ADDRESS); CTASSERT((DMAP_MAX_ADDRESS & ~L1_OFFSET) == DMAP_MAX_ADDRESS); @@ -1872,8 +1878,8 @@ SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG | CTLFLAG_RD | CTLFLAG_MPSAFE, /* * grow the number of kernel page table entries, if needed */ -void -pmap_growkernel(vm_offset_t addr) +static int +pmap_growkernel_nopanic(vm_offset_t addr) { vm_paddr_t paddr; vm_page_t nkpg; @@ -1893,7 +1899,8 @@ pmap_growkernel(vm_offset_t addr) nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) - panic("%s: no memory to grow kernel", __func__); + return (KERN_RESOURCE_SHORTAGE); + nkpg->pindex = pmap_l1_pindex(kernel_vm_end); paddr = VM_PAGE_TO_PHYS(nkpg); @@ -1919,7 +1926,7 @@ pmap_growkernel(vm_offset_t addr) nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_NOFREE | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) - panic("%s: no memory to grow kernel", __func__); + return (KERN_RESOURCE_SHORTAGE); nkpg->pindex = pmap_l2_pindex(kernel_vm_end); paddr = VM_PAGE_TO_PHYS(nkpg); @@ -1936,6 +1943,19 @@ pmap_growkernel(vm_offset_t addr) break; } } + + return (KERN_SUCCESS); +} + +int +pmap_growkernel(vm_offset_t addr) +{ + int rv; + + rv = pmap_growkernel_nopanic(addr); + if (rv != KERN_SUCCESS && pmap_growkernel_panic) + panic("pmap_growkernel: no memory to grow kernel"); + return (rv); } /*************************************************** @@ -3614,31 +3634,33 @@ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { + struct pctrie_iter pages; struct rwlock *lock; vm_offset_t va; vm_page_t m, mpte; - vm_pindex_t diff, psize; int rv; VM_OBJECT_ASSERT_LOCKED(m_start->object); - psize = atop(end - start); mpte = NULL; - m = m_start; + vm_page_iter_limit_init(&pages, m_start->object, + m_start->pindex + atop(end - start)); + m = vm_radix_iter_lookup(&pages, m_start->pindex); lock = NULL; rw_rlock(&pvh_global_lock); PMAP_LOCK(pmap); - while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - va = start + ptoa(diff); + while (m != NULL) { + va = start + ptoa(m->pindex - m_start->pindex); if ((va & L2_OFFSET) == 0 && va + L2_SIZE <= end && m->psind == 1 && pmap_ps_enabled(pmap) && ((rv = pmap_enter_2mpage(pmap, va, m, prot, &lock)) == - KERN_SUCCESS || rv == KERN_NO_SPACE)) - m = &m[L2_SIZE / PAGE_SIZE - 1]; - else + KERN_SUCCESS || rv == KERN_NO_SPACE)) { + m = vm_radix_iter_jump(&pages, L2_SIZE / PAGE_SIZE); + } else { mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock); - m = TAILQ_NEXT(m, listq); + m = vm_radix_iter_step(&pages); + } } if (lock != NULL) rw_wunlock(lock); @@ -4816,6 +4838,8 @@ pmap_unmapbios(void *p, vm_size_t size) void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { + if (m->md.pv_memattr == ma) + return; m->md.pv_memattr = ma; diff --git a/sys/riscv/riscv/sbi.c b/sys/riscv/riscv/sbi.c index b0a05bd88ef1..2d8e96a22bbe 100644 --- a/sys/riscv/riscv/sbi.c +++ b/sys/riscv/riscv/sbi.c @@ -351,10 +351,10 @@ sbi_identify(driver_t *driver, device_t parent) { device_t dev; - if (device_find_child(parent, "sbi", -1) != NULL) + if (device_find_child(parent, "sbi", DEVICE_UNIT_ANY) != NULL) return; - dev = BUS_ADD_CHILD(parent, 0, "sbi", -1); + dev = BUS_ADD_CHILD(parent, 0, "sbi", DEVICE_UNIT_ANY); if (dev == NULL) device_printf(parent, "Can't add sbi child\n"); } @@ -389,7 +389,7 @@ sbi_attach(device_t dev) #ifdef SMP di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO); resource_list_init(&di->rl); - child = device_add_child(dev, "sbi_ipi", -1); + child = device_add_child(dev, "sbi_ipi", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "Could not add sbi_ipi child\n"); return (ENXIO); diff --git a/sys/riscv/riscv/trap.c b/sys/riscv/riscv/trap.c index a1a30eb58220..d6df1245a24e 100644 --- a/sys/riscv/riscv/trap.c +++ b/sys/riscv/riscv/trap.c @@ -239,7 +239,7 @@ page_fault_handler(struct trapframe *frame, int usermode) * Enable interrupts for the duration of the page fault. For * user faults this was done already in do_trap_user(). */ - if ((frame->tf_sstatus & SSTATUS_SIE) != 0) + if ((frame->tf_sstatus & SSTATUS_SPIE) != 0) intr_enable(); if (stval >= VM_MIN_KERNEL_ADDRESS) { diff --git a/sys/riscv/riscv/vm_machdep.c b/sys/riscv/riscv/vm_machdep.c index cf9b85493e39..e5a5cf31af15 100644 --- a/sys/riscv/riscv/vm_machdep.c +++ b/sys/riscv/riscv/vm_machdep.c @@ -200,7 +200,7 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, } int -cpu_set_user_tls(struct thread *td, void *tls_base) +cpu_set_user_tls(struct thread *td, void *tls_base, int thr_flags __unused) { if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) diff --git a/sys/riscv/sifive/sifive_gpio.c b/sys/riscv/sifive/sifive_gpio.c index ef68d2b39da3..98bff2f72082 100644 --- a/sys/riscv/sifive/sifive_gpio.c +++ b/sys/riscv/sifive/sifive_gpio.c @@ -157,13 +157,14 @@ sfgpio_attach(device_t dev) sc->gpio_pins[i].gp_name[GPIOMAXNAME - 1] = '\0'; } - sc->busdev = gpiobus_attach_bus(dev); + sc->busdev = gpiobus_add_bus(dev); if (sc->busdev == NULL) { device_printf(dev, "Cannot attach gpiobus\n"); error = ENXIO; goto fail; } + bus_attach_children(dev); return (0); fail: diff --git a/sys/riscv/starfive/files.starfive b/sys/riscv/starfive/files.starfive index 57d4618d00f9..4db35e8cb351 100644 --- a/sys/riscv/starfive/files.starfive +++ b/sys/riscv/starfive/files.starfive @@ -9,4 +9,6 @@ dev/eqos/if_eqos.c optional eqos dev/eqos/if_eqos_if.m optional eqos dev/eqos/if_eqos_starfive.c optional eqos +riscv/starfive/jh7110_gpio.c standard +riscv/starfive/jh7110_pcie.c optional pci fdt riscv/starfive/starfive_syscon.c standard diff --git a/sys/riscv/starfive/jh7110_gpio.c b/sys/riscv/starfive/jh7110_gpio.c new file mode 100644 index 000000000000..1ed7d9f42259 --- /dev/null +++ b/sys/riscv/starfive/jh7110_gpio.c @@ -0,0 +1,368 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2023 Jari Sihvola <jsihv@gmx.com> + */ + +#include <sys/cdefs.h> + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> + +#include <sys/gpio.h> +#include <sys/kernel.h> +#include <sys/lock.h> +#include <sys/module.h> +#include <sys/mutex.h> +#include <sys/rman.h> + +#include <machine/bus.h> +#include <machine/intr.h> +#include <machine/resource.h> + +#include <dev/clk/clk.h> +#include <dev/gpio/gpiobusvar.h> +#include <dev/ofw/ofw_bus.h> +#include <dev/ofw/ofw_bus_subr.h> + +#include "gpio_if.h" + +#define GPIO_PINS 64 +#define GPIO_REGS 2 + +#define GP0_DOEN_CFG 0x0 +#define GP0_DOUT_CFG 0x40 +#define GPIOEN 0xdc +#define GPIOE_0 0x100 +#define GPIOE_1 0x104 +#define GPIO_DIN_LOW 0x118 +#define GPIO_DIN_HIGH 0x11c +#define IOMUX_SYSCFG_288 0x120 + +#define PAD_INPUT_EN (1 << 0) +#define PAD_PULLUP (1 << 3) +#define PAD_PULLDOWN (1 << 4) +#define PAD_HYST (1 << 6) + +#define ENABLE_MASK 0x3f +#define DATA_OUT_MASK 0x7f +#define DIROUT_DISABLE 1 + +struct jh7110_gpio_softc { + device_t dev; + device_t busdev; + struct mtx mtx; + struct resource *res; + clk_t clk; +}; + +static struct ofw_compat_data compat_data[] = { + {"starfive,jh7110-sys-pinctrl", 1}, + {NULL, 0} +}; + +static struct resource_spec jh7110_gpio_spec[] = { + { SYS_RES_MEMORY, 0, RF_ACTIVE }, + { -1, 0 } +}; + +#define GPIO_RW_OFFSET(_val) (_val & ~3) +#define GPIO_SHIFT(_val) ((_val & 3) * 8) +#define PAD_OFFSET(_val) (_val * 4) + +#define JH7110_GPIO_LOCK(_sc) mtx_lock(&(_sc)->mtx) +#define JH7110_GPIO_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) + +#define JH7110_GPIO_READ(sc, reg) bus_read_4((sc)->res, (reg)) +#define JH7110_GPIO_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) + +static device_t +jh7110_gpio_get_bus(device_t dev) +{ + struct jh7110_gpio_softc *sc; + + sc = device_get_softc(dev); + + return (sc->busdev); +} + +static int +jh7110_gpio_pin_max(device_t dev, int *maxpin) +{ + *maxpin = GPIO_PINS - 1; + + return (0); +} + +static int +jh7110_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) +{ + struct jh7110_gpio_softc *sc; + uint32_t reg; + + sc = device_get_softc(dev); + + if (pin >= GPIO_PINS) + return (EINVAL); + + JH7110_GPIO_LOCK(sc); + if (pin < GPIO_PINS / GPIO_REGS) { + reg = JH7110_GPIO_READ(sc, GPIO_DIN_LOW); + *val = (reg >> pin) & 0x1; + } else { + reg = JH7110_GPIO_READ(sc, GPIO_DIN_HIGH); + *val = (reg >> (pin - GPIO_PINS / GPIO_REGS)) & 0x1; + } + JH7110_GPIO_UNLOCK(sc); + + return (0); +} + +static int +jh7110_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) +{ + struct jh7110_gpio_softc *sc; + uint32_t reg; + + sc = device_get_softc(dev); + + if (pin >= GPIO_PINS) + return (EINVAL); + + JH7110_GPIO_LOCK(sc); + reg = JH7110_GPIO_READ(sc, GP0_DOUT_CFG + GPIO_RW_OFFSET(pin)); + reg &= ~(DATA_OUT_MASK << GPIO_SHIFT(pin)); + if (value) + reg |= 0x1 << GPIO_SHIFT(pin); + JH7110_GPIO_WRITE(sc, GP0_DOUT_CFG + GPIO_RW_OFFSET(pin), reg); + JH7110_GPIO_UNLOCK(sc); + + return (0); +} + +static int +jh7110_gpio_pin_toggle(device_t dev, uint32_t pin) +{ + struct jh7110_gpio_softc *sc; + uint32_t reg; + + sc = device_get_softc(dev); + + if (pin >= GPIO_PINS) + return (EINVAL); + + JH7110_GPIO_LOCK(sc); + reg = JH7110_GPIO_READ(sc, GP0_DOUT_CFG + GPIO_RW_OFFSET(pin)); + if (reg & 0x1 << GPIO_SHIFT(pin)) { + reg &= ~(DATA_OUT_MASK << GPIO_SHIFT(pin)); + } else { + reg &= ~(DATA_OUT_MASK << GPIO_SHIFT(pin)); + reg |= 0x1 << GPIO_SHIFT(pin); + } + JH7110_GPIO_WRITE(sc, GP0_DOUT_CFG + GPIO_RW_OFFSET(pin), reg); + JH7110_GPIO_UNLOCK(sc); + + return (0); +} + +static int +jh7110_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) +{ + if (pin >= GPIO_PINS) + return (EINVAL); + + *caps = (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT); + + return (0); +} + +static int +jh7110_gpio_pin_getname(device_t dev, uint32_t pin, char *name) +{ + if (pin >= GPIO_PINS) + return (EINVAL); + + snprintf(name, GPIOMAXNAME, "GPIO%d", pin); + + return (0); +} + +static int +jh7110_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) +{ + struct jh7110_gpio_softc *sc; + uint32_t reg; + + sc = device_get_softc(dev); + + if (pin >= GPIO_PINS) + return (EINVAL); + + /* Reading the direction */ + JH7110_GPIO_LOCK(sc); + reg = JH7110_GPIO_READ(sc, GP0_DOEN_CFG + GPIO_RW_OFFSET(pin)); + if ((reg & ENABLE_MASK << GPIO_SHIFT(pin)) == 0) + *flags |= GPIO_PIN_OUTPUT; + else + *flags |= GPIO_PIN_INPUT; + JH7110_GPIO_UNLOCK(sc); + + return (0); +} + +static int +jh7110_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) +{ + struct jh7110_gpio_softc *sc; + uint32_t reg; + + sc = device_get_softc(dev); + + if (pin >= GPIO_PINS) + return (EINVAL); + + /* Setting the direction, enable or disable output, configuring pads */ + + JH7110_GPIO_LOCK(sc); + + if (flags & GPIO_PIN_INPUT) { + reg = JH7110_GPIO_READ(sc, IOMUX_SYSCFG_288 + PAD_OFFSET(pin)); + reg |= (PAD_INPUT_EN | PAD_HYST); + JH7110_GPIO_WRITE(sc, IOMUX_SYSCFG_288 + PAD_OFFSET(pin), reg); + } + + reg = JH7110_GPIO_READ(sc, GP0_DOEN_CFG + GPIO_RW_OFFSET(pin)); + reg &= ~(ENABLE_MASK << GPIO_SHIFT(pin)); + if (flags & GPIO_PIN_INPUT) { + reg |= DIROUT_DISABLE << GPIO_SHIFT(pin); + } + JH7110_GPIO_WRITE(sc, GP0_DOEN_CFG + GPIO_RW_OFFSET(pin), reg); + + if (flags & GPIO_PIN_OUTPUT) { + reg = JH7110_GPIO_READ(sc, GP0_DOUT_CFG + GPIO_RW_OFFSET(pin)); + reg &= ~(ENABLE_MASK << GPIO_SHIFT(pin)); + reg |= 0x1 << GPIO_SHIFT(pin); + JH7110_GPIO_WRITE(sc, GP0_DOUT_CFG + GPIO_RW_OFFSET(pin), reg); + + reg = JH7110_GPIO_READ(sc, IOMUX_SYSCFG_288 + PAD_OFFSET(pin)); + reg &= ~(PAD_INPUT_EN | PAD_PULLUP | PAD_PULLDOWN | PAD_HYST); + JH7110_GPIO_WRITE(sc, IOMUX_SYSCFG_288 + PAD_OFFSET(pin), reg); + } + + JH7110_GPIO_UNLOCK(sc); + + return (0); +} + +static int +jh7110_gpio_probe(device_t dev) +{ + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) + return (ENXIO); + + device_set_desc(dev, "StarFive JH7110 GPIO controller"); + + return (BUS_PROBE_DEFAULT); +} + +static int +jh7110_gpio_detach(device_t dev) +{ + struct jh7110_gpio_softc *sc; + + sc = device_get_softc(dev); + + bus_release_resources(dev, jh7110_gpio_spec, &sc->res); + if (sc->busdev != NULL) + gpiobus_detach_bus(dev); + if (sc->clk != NULL) + clk_release(sc->clk); + mtx_destroy(&sc->mtx); + + return (0); +} + +static int +jh7110_gpio_attach(device_t dev) +{ + struct jh7110_gpio_softc *sc; + + sc = device_get_softc(dev); + sc->dev = dev; + + mtx_init(&sc->mtx, device_get_nameunit(sc->dev), NULL, MTX_DEF); + + if (bus_alloc_resources(dev, jh7110_gpio_spec, &sc->res) != 0) { + device_printf(dev, "Could not allocate resources\n"); + bus_release_resources(dev, jh7110_gpio_spec, &sc->res); + mtx_destroy(&sc->mtx); + return (ENXIO); + } + + if (clk_get_by_ofw_index(dev, 0, 0, &sc->clk) != 0) { + device_printf(dev, "Cannot get clock\n"); + jh7110_gpio_detach(dev); + return (ENXIO); + } + + if (clk_enable(sc->clk) != 0) { + device_printf(dev, "Could not enable clock %s\n", + clk_get_name(sc->clk)); + jh7110_gpio_detach(dev); + return (ENXIO); + } + + /* Reseting GPIO interrupts */ + JH7110_GPIO_WRITE(sc, GPIOE_0, 0); + JH7110_GPIO_WRITE(sc, GPIOE_1, 0); + JH7110_GPIO_WRITE(sc, GPIOEN, 1); + + sc->busdev = gpiobus_add_bus(dev); + if (sc->busdev == NULL) { + device_printf(dev, "Cannot attach gpiobus\n"); + jh7110_gpio_detach(dev); + return (ENXIO); + } + + bus_attach_children(dev); + return (0); +} + +static phandle_t +jh7110_gpio_get_node(device_t bus, device_t dev) +{ + return (ofw_bus_get_node(bus)); +} + +static device_method_t jh7110_gpio_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, jh7110_gpio_probe), + DEVMETHOD(device_attach, jh7110_gpio_attach), + DEVMETHOD(device_detach, jh7110_gpio_detach), + + /* GPIO protocol */ + DEVMETHOD(gpio_get_bus, jh7110_gpio_get_bus), + DEVMETHOD(gpio_pin_max, jh7110_gpio_pin_max), + DEVMETHOD(gpio_pin_get, jh7110_gpio_pin_get), + DEVMETHOD(gpio_pin_set, jh7110_gpio_pin_set), + DEVMETHOD(gpio_pin_toggle, jh7110_gpio_pin_toggle), + DEVMETHOD(gpio_pin_getflags, jh7110_gpio_pin_getflags), + DEVMETHOD(gpio_pin_setflags, jh7110_gpio_pin_setflags), + DEVMETHOD(gpio_pin_getcaps, jh7110_gpio_pin_getcaps), + DEVMETHOD(gpio_pin_getname, jh7110_gpio_pin_getname), + + /* ofw_bus interface */ + DEVMETHOD(ofw_bus_get_node, jh7110_gpio_get_node), + + DEVMETHOD_END +}; + +DEFINE_CLASS_0(gpio, jh7110_gpio_driver, jh7110_gpio_methods, + sizeof(struct jh7110_gpio_softc)); +EARLY_DRIVER_MODULE(jh7110_gpio, simplebus, jh7110_gpio_driver, 0, 0, + BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE); +MODULE_DEPEND(jh7110_gpio, gpiobus, 1, 1, 1); diff --git a/sys/riscv/starfive/jh7110_pcie.c b/sys/riscv/starfive/jh7110_pcie.c new file mode 100644 index 000000000000..5181252ab2dc --- /dev/null +++ b/sys/riscv/starfive/jh7110_pcie.c @@ -0,0 +1,1037 @@ +/* + * SPDX-License-Identifier: BSD-2-Clause + * + * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com> + */ + +/* JH7110 PCIe controller driver */ + +#include <sys/param.h> +#include <sys/systm.h> +#include <sys/bus.h> +#include <sys/gpio.h> +#include <sys/kernel.h> +#include <sys/lock.h> +#include <sys/malloc.h> +#include <sys/module.h> +#include <sys/mutex.h> +#include <sys/proc.h> +#include <sys/rman.h> + +#include <machine/bus.h> +#include <machine/intr.h> +#include <machine/resource.h> + +#include <dev/clk/clk.h> +#include <dev/gpio/gpiobusvar.h> +#include <dev/hwreset/hwreset.h> +#include <dev/regulator/regulator.h> +#include <dev/syscon/syscon.h> +#include <dev/ofw/ofw_bus.h> +#include <dev/ofw/ofw_bus_subr.h> +#include <dev/ofw/ofw_pci.h> +#include <dev/ofw/ofwpci.h> +#include <dev/pci/pci_host_generic.h> +#include <dev/pci/pcireg.h> +#include <dev/pci/pcivar.h> +#include <dev/pci/pcib_private.h> + +#include "msi_if.h" +#include "ofw_bus_if.h" +#include "pcib_if.h" +#include "pic_if.h" +#include "syscon_if.h" + +#define IRQ_LOCAL_MASK 0x180 +#define IRQ_LOCAL_STATUS 0x184 +#define IRQ_MSI_BASE 0x190 +#define IRQ_MSI_STATUS 0x194 + +#define MSI_MASK 0x10000000 +#define INTX_MASK 0xf000000 +#define ERROR_MASK 0x80770000 + +#define MSI_COUNT 32 +#define MSI_USED 0x1 +#define MSI_PCIE0_MASK_OFFSET 0xa0; +#define MSI_PCIE1_MASK_OFFSET 0xf0; + +#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800 +#define ATR0_AXI4_SLV0_SRC_ADDR 0x804 +#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808 +#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810 +#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80c +#define ATR_ENTRY_SIZE 0x20 +#define ATR0_PCIE_ATR_SIZE 0x25 +#define ATR0_PCIE_ATR_SIZE_SHIFT 1 +#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600 +#define ATR0_PCIE_WIN0_SRC_ADDR 0x604 +#define ATR0_ENABLE 1 + +#define PCIE_TXRX_INTERFACE 0x0 +#define PCIE_CONF_INTERFACE 0x1 +#define PCIE_WINCONF 0xfc +#define PREF_MEM_WIN_64_SUPPORT (1U << 3) + +#define STG_AXI4_SLVL_AW_MASK 0x7fff +#define STG_AXI4_SLVL_AR_MASK 0x7fff00 +#define STG_PCIE0_BASE 0x48 +#define STG_PCIE1_BASE 0x1f8 +#define STG_RP_NEP_OFFSET 0xe8 +#define STG_K_RP_NEP (1U << 8) +#define STG_CKREF_MASK 0xC0000 +#define STG_CKREF_VAL 0x80000 +#define STG_CLKREQ (1U << 22) +#define STG_AR_OFFSET 0x78 +#define STG_AW_OFFSET 0x7c +#define STG_AXI4_SLVL_ARFUNC_SHIFT 0x8 +#define STG_LNKSTA_OFFSET 0x170 +#define STG_LINK_UP (1U << 5) + +#define PHY_FUNC_SHIFT 9 +#define PHY_FUNC_DIS (1U << 15) +#define PCI_MISC_REG 0xb4 +#define PCI_GENERAL_SETUP_REG 0x80 +#define PCI_CONF_SPACE_REGS 0x1000 +#define ROOTPORT_ENABLE 0x1 +#define PMSG_RX_SUPPORT_REG 0x3f0 +#define PMSG_LTR_SUPPORT (1U << 2) +#define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_IDS_CLASS_CODE_SHIFT 16 +#define PCIE_PCI_IDS_REG 0x9c +#define REV_ID_MASK 0xff + +#define PLDA_AXI_POST_ERR (1U << 16) +#define PLDA_AXI_FETCH_ERR (1U << 17) +#define PLDA_AXI_DISCARD_ERR (1U << 18) +#define PLDA_PCIE_POST_ERR (1U << 20) +#define PLDA_PCIE_FETCH_ERR (1U << 21) +#define PLDA_PCIE_DISCARD_ERR (1U << 22) +#define PLDA_SYS_ERR (1U << 31) + +/* Compatible devices. */ +static struct ofw_compat_data compat_data[] = { + {"starfive,jh7110-pcie", 1}, + {NULL, 0}, +}; + +struct jh7110_pcie_irqsrc { + struct intr_irqsrc isrc; + u_int irq; + u_int is_used; +}; + +struct jh7110_pcie_softc { + struct ofw_pci_softc ofw_pci; + device_t dev; + phandle_t node; + + struct resource *reg_mem_res; + struct resource *cfg_mem_res; + struct resource *irq_res; + struct jh7110_pcie_irqsrc *isrcs; + void *irq_cookie; + struct syscon *stg_syscon; + uint64_t stg_baddr; + + struct ofw_pci_range range_mem32; + struct ofw_pci_range range_mem64; + + struct mtx msi_mtx; + uint64_t msi_mask_offset; + + gpio_pin_t perst_pin; + + clk_t clk_noc; + clk_t clk_tl; + clk_t clk_axi; + clk_t clk_apb; + + hwreset_t rst_mst0; + hwreset_t rst_slv0; + hwreset_t rst_slv; + hwreset_t rst_brg; + hwreset_t rst_core; + hwreset_t rst_apb; +}; + +#define LOW32(val) (uint32_t)(val) +#define HI32(val) (uint32_t)(val >> 32) + +#define RD4(sc, reg) bus_read_4((sc)->reg_mem_res, (reg)) +#define WR4(sc, reg, val) bus_write_4((sc)->reg_mem_res, (reg), (val)) + +static uint32_t +jh7110_pcie_read_config(device_t dev, u_int bus, u_int slot, u_int func, + u_int reg, int bytes) +{ + struct jh7110_pcie_softc *sc; + uint32_t data, offset; + + sc = device_get_softc(dev); + offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); + + /* Certain config registers are not supposed to be accessed from here */ + if (bus == 0 && (offset == PCIR_BAR(0) || offset == PCIR_BAR(1))) + return (~0U); + + switch (bytes) { + case 1: + data = bus_read_1(sc->cfg_mem_res, offset); + break; + case 2: + data = le16toh(bus_read_2(sc->cfg_mem_res, offset)); + break; + case 4: + data = le32toh(bus_read_4(sc->cfg_mem_res, offset)); + break; + default: + return (~0U); + } + + return (data); +} + +static void +jh7110_pcie_write_config(device_t dev, u_int bus, u_int slot, u_int func, + u_int reg, uint32_t val, int bytes) +{ + struct jh7110_pcie_softc *sc; + uint32_t offset; + + sc = device_get_softc(dev); + offset = PCIE_ADDR_OFFSET(bus, slot, func, reg); + + /* Certain config registers are not supposed to be accessed from here */ + if (bus == 0 && (offset == PCIR_BAR(0) || offset == PCIR_BAR(1))) + return; + + switch (bytes) { + case 1: + bus_write_1(sc->cfg_mem_res, offset, val); + break; + case 2: + bus_write_2(sc->cfg_mem_res, offset, htole16(val)); + break; + case 4: + bus_write_4(sc->cfg_mem_res, offset, htole32(val)); + break; + default: + return; + } +} + +static int +jh7110_pcie_intr(void *arg) +{ + struct jh7110_pcie_softc *sc; + struct trapframe *tf; + struct jh7110_pcie_irqsrc *irq; + uint32_t reg, irqbits; + int err, i; + + sc = (struct jh7110_pcie_softc *)arg; + tf = curthread->td_intr_frame; + + reg = RD4(sc, IRQ_LOCAL_STATUS); + if (reg == 0) + return (ENXIO); + + if ((reg & MSI_MASK) != 0) { + WR4(sc, IRQ_LOCAL_STATUS, MSI_MASK); + + irqbits = RD4(sc, IRQ_MSI_STATUS); + for (i = 0; irqbits != 0; i++) { + if ((irqbits & (1U << i)) != 0) { + irq = &sc->isrcs[i]; + err = intr_isrc_dispatch(&irq->isrc, tf); + if (err != 0) + device_printf(sc->dev, + "MSI 0x%x gives error %d\n", + i, err); + irqbits &= ~(1U << i); + } + } + } + if ((reg & INTX_MASK) != 0) { + irqbits = (reg & INTX_MASK); + WR4(sc, IRQ_LOCAL_STATUS, irqbits); + } + if ((reg & ERROR_MASK) != 0) { + irqbits = (reg & ERROR_MASK); + if ((reg & PLDA_AXI_POST_ERR) != 0) + device_printf(sc->dev, "axi post error\n"); + if ((reg & PLDA_AXI_FETCH_ERR) != 0) + device_printf(sc->dev, "axi fetch error\n"); + if ((reg & PLDA_AXI_DISCARD_ERR) != 0) + device_printf(sc->dev, "axi discard error\n"); + if ((reg & PLDA_PCIE_POST_ERR) != 0) + device_printf(sc->dev, "pcie post error\n"); + if ((reg & PLDA_PCIE_FETCH_ERR) != 0) + device_printf(sc->dev, "pcie fetch error\n"); + if ((reg & PLDA_PCIE_DISCARD_ERR) != 0) + device_printf(sc->dev, "pcie discard error\n"); + if ((reg & PLDA_SYS_ERR) != 0) + device_printf(sc->dev, "pcie sys error\n"); + WR4(sc, IRQ_LOCAL_STATUS, irqbits); + } + + return (FILTER_HANDLED); +} + +static int +jh7110_pcie_route_interrupt(device_t bus, device_t dev, int pin) +{ + struct jh7110_pcie_softc *sc; + u_int irq; + + sc = device_get_softc(bus); + irq = intr_map_clone_irq(rman_get_start(sc->irq_res)); + device_printf(bus, "route pin %d for device %d.%d to %u\n", + pin, pci_get_slot(dev), pci_get_function(dev), irq); + + return (irq); +} + +static int +jh7110_pcie_maxslots(device_t dev) +{ + return (PCI_SLOTMAX); +} + +static int +jh7110_pcie_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount, + device_t *pic, struct intr_irqsrc **srcs) +{ + struct jh7110_pcie_softc *sc; + int i, beg; + + sc = device_get_softc(dev); + mtx_lock(&sc->msi_mtx); + + /* Search for a requested contiguous region */ + for (beg = 0; beg + count < MSI_COUNT; ) { + for (i = beg; i < beg + count; i++) { + if (sc->isrcs[i].is_used == MSI_USED) + goto next; + } + goto found; +next: + beg = i + 1; + } + + /* Requested area not found */ + mtx_unlock(&sc->msi_mtx); + device_printf(dev, "warning: failed to allocate %d MSIs.\n", count); + + return (ENXIO); + +found: + /* Mark and allocate messages */ + for (i = 0; i < count; ++i) { + sc->isrcs[i + beg].is_used = MSI_USED; + srcs[i] = &(sc->isrcs[i + beg].isrc); + } + + mtx_unlock(&sc->msi_mtx); + *pic = device_get_parent(dev); + + return (0); +} + +static int +jh7110_pcie_alloc_msi(device_t pci, device_t child, int count, + int maxcount, int *irqs) +{ + phandle_t msi_parent; + int err; + + msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); + err = intr_alloc_msi(pci, child, msi_parent, count, maxcount, irqs); + + return (err); +} + +static int +jh7110_pcie_release_msi(device_t pci, device_t child, int count, int *irqs) +{ + phandle_t msi_parent; + int err; + + msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); + err = intr_release_msi(pci, child, msi_parent, count, irqs); + + return (err); +} + +static int +jh7110_pcie_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc, + uint64_t *addr, uint32_t *data) +{ + struct jh7110_pcie_irqsrc *jhirq = (struct jh7110_pcie_irqsrc *)isrc; + + *addr = IRQ_MSI_BASE; + *data = jhirq->irq; + + return (0); +} + + +static int +jh7110_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr, + uint32_t *data) +{ + phandle_t msi_parent; + int err; + + msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); + + err = intr_map_msi(pci, child, msi_parent, irq, addr, data); + if (err != 0) { + device_printf(pci, "intr_map_msi() failed\n"); + return (err); + } + + return (err); +} + +static int +jh7110_pcie_alloc_msix(device_t pci, device_t child, int *irq) +{ + return (jh7110_pcie_alloc_msi(pci, child, 1, 32, irq)); +} + +static int +jh7110_pcie_release_msix(device_t pci, device_t child, int irq) +{ + phandle_t msi_parent; + int err; + + msi_parent = OF_xref_from_node(ofw_bus_get_node(pci)); + err = intr_release_msix(pci, child, msi_parent, irq); + + return (err); +} + +static int +jh7110_pcie_msi_alloc_msix(device_t dev, device_t child, device_t *pic, + struct intr_irqsrc **isrcp) +{ + return (jh7110_pcie_msi_alloc_msi(dev, child, 1, 32, pic, isrcp)); +} + +static int +jh7110_pcie_msi_release_msi(device_t dev, device_t child, int count, + struct intr_irqsrc **isrc) +{ + struct jh7110_pcie_softc *sc; + struct jh7110_pcie_irqsrc *irq; + int i; + + sc = device_get_softc(dev); + mtx_lock(&sc->msi_mtx); + + for (i = 0; i < count; i++) { + irq = (struct jh7110_pcie_irqsrc *)isrc[i]; + + KASSERT((irq->is_used & MSI_USED) == MSI_USED, + ("%s: Trying to release an unused MSI(-X) interrupt", + __func__)); + + irq->is_used = 0; + } + + mtx_unlock(&sc->msi_mtx); + return (0); +} + +static int +jh7110_pcie_msi_release_msix(device_t dev, device_t child, + struct intr_irqsrc *isrc) +{ + return (jh7110_pcie_msi_release_msi(dev, child, 1, &isrc)); +} + +static void +jh7110_pcie_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask) +{ + struct jh7110_pcie_softc *sc; + struct jh7110_pcie_irqsrc *jhirq = (struct jh7110_pcie_irqsrc *)isrc; + uint32_t reg, irq; + + sc = device_get_softc(dev); + irq = jhirq->irq; + + reg = bus_read_4(sc->cfg_mem_res, sc->msi_mask_offset); + if (mask != 0) + reg &= ~(1U << irq); + else + reg |= (1U << irq); + bus_write_4(sc->cfg_mem_res, sc->msi_mask_offset, reg); +} + +static void +jh7110_pcie_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc) +{ + jh7110_pcie_msi_mask(dev, isrc, true); +} + +static void +jh7110_pcie_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc) +{ + jh7110_pcie_msi_mask(dev, isrc, false); +} + +static void +jh7110_pcie_msi_post_filter(device_t dev, struct intr_irqsrc *isrc) +{ +} + +static void +jh7110_pcie_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc) +{ +} + +static void +jh7110_pcie_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc) +{ + struct jh7110_pcie_softc *sc; + struct jh7110_pcie_irqsrc *jhirq = (struct jh7110_pcie_irqsrc *)isrc; + uint32_t irq; + + sc = device_get_softc(dev); + irq = jhirq->irq; + + /* MSI bottom ack */ + WR4(sc, IRQ_MSI_STATUS, (1U << irq)); +} + +static int +jh7110_pcie_decode_ranges(struct jh7110_pcie_softc *sc, + struct ofw_pci_range *ranges, int nranges) +{ + int i; + + for (i = 0; i < nranges; i++) { + if (((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == + OFW_PCI_PHYS_HI_SPACE_MEM64)) { + if (sc->range_mem64.size != 0) { + device_printf(sc->dev, + "Duplicate range mem64 found in DT\n"); + return (ENXIO); + } + sc->range_mem64 = ranges[i]; + } else if (((ranges[i].pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) == + OFW_PCI_PHYS_HI_SPACE_MEM32)) { + if (sc->range_mem32.size != 0) { + device_printf(sc->dev, + "Duplicated range mem32 found in DT\n"); + return (ENXIO); + } + sc->range_mem32 = ranges[i]; + } + } + return (0); +} + +static int +jh7110_pcie_probe(device_t dev) +{ + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) + return (ENXIO); + + device_set_desc(dev, "Starfive JH7110 PCIe controller"); + + return (BUS_PROBE_DEFAULT); +} + +static void +jh7110_pcie_set_atr(device_t dev, uint64_t axi_begin, uint64_t pci_begin, + uint64_t win_size, uint32_t win_idx) +{ + struct jh7110_pcie_softc *sc; + uint32_t val, taddr_size; + + sc = device_get_softc(dev); + + if (win_idx == 0) + val = PCIE_CONF_INTERFACE; + else + val = PCIE_TXRX_INTERFACE; + + WR4(sc, ATR0_AXI4_SLV0_TRSL_PARAM + win_idx * ATR_ENTRY_SIZE, val); + + taddr_size = ilog2(win_size) - 1; + val = LOW32(axi_begin) | taddr_size << ATR0_PCIE_ATR_SIZE_SHIFT | + ATR0_ENABLE; + + WR4(sc, ATR0_AXI4_SLV0_SRCADDR_PARAM + win_idx * ATR_ENTRY_SIZE, val); + + val = HI32(axi_begin); + WR4(sc, ATR0_AXI4_SLV0_SRC_ADDR + win_idx * ATR_ENTRY_SIZE, val); + + val = LOW32(pci_begin); + WR4(sc, ATR0_AXI4_SLV0_TRSL_ADDR_LSB + win_idx * ATR_ENTRY_SIZE, val); + + val = HI32(pci_begin); + WR4(sc, ATR0_AXI4_SLV0_TRSL_ADDR_UDW + win_idx * ATR_ENTRY_SIZE, val); + + val = RD4(sc, ATR0_PCIE_WIN0_SRCADDR_PARAM); + val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT); + + WR4(sc, ATR0_PCIE_WIN0_SRCADDR_PARAM, val); + WR4(sc, ATR0_PCIE_WIN0_SRC_ADDR, 0); +} + +static int +jh7110_pcie_parse_fdt_resources(struct jh7110_pcie_softc *sc) +{ + uint32_t val; + int err; + + /* Getting clocks */ + if (clk_get_by_ofw_name(sc->dev, 0, "noc", &sc->clk_noc) != 0) { + device_printf(sc->dev, "could not get noc clock\n"); + sc->clk_noc = NULL; + return (ENXIO); + } + if (clk_get_by_ofw_name(sc->dev, 0, "tl", &sc->clk_tl) != 0) { + device_printf(sc->dev, "could not get tl clock\n"); + sc->clk_tl = NULL; + return (ENXIO); + } + if (clk_get_by_ofw_name(sc->dev, 0, "axi_mst0", &sc->clk_axi) != 0) { + device_printf(sc->dev, "could not get axi_mst0 clock\n"); + sc->clk_axi = NULL; + return (ENXIO); + } + if (clk_get_by_ofw_name(sc->dev, 0, "apb", &sc->clk_apb) != 0) { + device_printf(sc->dev, "could not get apb clock\n"); + sc->clk_apb = NULL; + return (ENXIO); + } + + /* Getting resets */ + err = hwreset_get_by_ofw_name(sc->dev, 0, "mst0", &sc->rst_mst0); + if (err != 0) { + device_printf(sc->dev, "cannot get 'rst_mst0' reset\n"); + return (ENXIO); + } + err = hwreset_get_by_ofw_name(sc->dev, 0, "slv0", &sc->rst_slv0); + if (err != 0) { + device_printf(sc->dev, "cannot get 'rst_slv0' reset\n"); + return (ENXIO); + } + err = hwreset_get_by_ofw_name(sc->dev, 0, "slv", &sc->rst_slv); + if (err != 0) { + device_printf(sc->dev, "cannot get 'rst_slv' reset\n"); + return (ENXIO); + } + err = hwreset_get_by_ofw_name(sc->dev, 0, "brg", &sc->rst_brg); + if (err != 0) { + device_printf(sc->dev, "cannot get 'rst_brg' reset\n"); + return (ENXIO); + } + err = hwreset_get_by_ofw_name(sc->dev, 0, "core", &sc->rst_core); + if (err != 0) { + device_printf(sc->dev, "cannot get 'rst_core' reset\n"); + return (ENXIO); + } + err = hwreset_get_by_ofw_name(sc->dev, 0, "apb", &sc->rst_apb); + if (err != 0) { + device_printf(sc->dev, "cannot get 'rst_apb' reset\n"); + return (ENXIO); + } + + /* Getting PCI endpoint reset pin */ + err = gpio_pin_get_by_ofw_property(sc->dev, sc->node, "perst-gpios", + &sc->perst_pin); + if (err != 0) { + device_printf(sc->dev, "Cannot get perst-gpios\n"); + return (ENXIO); + } + + /* Getting syscon property */ + if (syscon_get_by_ofw_property(sc->dev, sc->node, "starfive,stg-syscon", + &sc->stg_syscon) != 0) { + device_printf(sc->dev, "Cannot get starfive,stg-syscon\n"); + return (ENXIO); + } + + /* Assigning syscon base address and MSI mask offset */ + err = OF_getencprop(sc->node, "linux,pci-domain", &val, sizeof(val)); + if (err == -1) { + device_printf(sc->dev, + "Couldn't get pci-domain property, error: %d\n", err); + return (ENXIO); + } + + if (val == 0) { + sc->stg_baddr = STG_PCIE0_BASE; + sc->msi_mask_offset = MSI_PCIE0_MASK_OFFSET; + } else if (val == 1) { + sc->stg_baddr = STG_PCIE1_BASE; + sc->msi_mask_offset = MSI_PCIE1_MASK_OFFSET; + } else { + device_printf(sc->dev, "Error: an invalid pci-domain value\n"); + return (ENXIO); + } + + return (0); +} + +static void +jh7110_pcie_release_resources(device_t dev) +{ + struct jh7110_pcie_softc *sc; + + sc = device_get_softc(dev); + + if (sc->irq_res != NULL) + bus_teardown_intr(dev, sc->irq_res, sc->irq_cookie); + if (sc->irq_res != NULL) + bus_free_resource(dev, SYS_RES_IRQ, sc->irq_res); + if (sc->reg_mem_res != NULL) + bus_free_resource(dev, SYS_RES_MEMORY, sc->reg_mem_res); + if (sc->cfg_mem_res != NULL) + bus_free_resource(dev, SYS_RES_MEMORY, sc->cfg_mem_res); + + if (sc->clk_noc != NULL) + clk_release(sc->clk_noc); + if (sc->clk_tl != NULL) + clk_release(sc->clk_tl); + if (sc->clk_axi != NULL) + clk_release(sc->clk_axi); + if (sc->clk_apb != NULL) + clk_release(sc->clk_apb); + + gpio_pin_release(sc->perst_pin); + + hwreset_release(sc->rst_mst0); + hwreset_release(sc->rst_slv0); + hwreset_release(sc->rst_slv); + hwreset_release(sc->rst_brg); + hwreset_release(sc->rst_core); + hwreset_release(sc->rst_apb); + + mtx_destroy(&sc->msi_mtx); +} + +static int +jh7110_pcie_detach(device_t dev) +{ + ofw_pcib_fini(dev); + jh7110_pcie_release_resources(dev); + + return (0); +} + +static int +jh7110_pcie_attach(device_t dev) +{ + struct jh7110_pcie_softc *sc; + phandle_t xref; + uint32_t val; + int i, err, rid, irq, win_idx = 0; + char name[INTR_ISRC_NAMELEN]; + + sc = device_get_softc(dev); + sc->dev = dev; + sc->node = ofw_bus_get_node(dev); + + sc->irq_res = NULL; + sc->reg_mem_res = NULL; + sc->cfg_mem_res = NULL; + sc->clk_noc = NULL; + sc->clk_tl = NULL; + sc->clk_axi = NULL; + sc->clk_apb = NULL; + + mtx_init(&sc->msi_mtx, "jh7110_pcie, msi_mtx", NULL, MTX_DEF); + + /* Allocating memory */ + err = ofw_bus_find_string_index(sc->node, "reg-names", "apb", &rid); + if (err != 0) { + device_printf(dev, "Cannot get apb memory\n"); + goto out; + } + + sc->reg_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (sc->reg_mem_res == NULL) { + device_printf(dev, "Cannot allocate apb memory\n"); + err = ENXIO; + goto out; + } + + err = ofw_bus_find_string_index(sc->node, "reg-names", "cfg", &rid); + if (err != 0) { + device_printf(dev, "Cannot get cfg memory\n"); + err = ENXIO; + goto out; + } + + sc->cfg_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); + if (sc->cfg_mem_res == NULL) { + device_printf(dev, "Cannot allocate cfg memory\n"); + err = ENXIO; + goto out; + } + + /* Getting device tree properties */ + if (jh7110_pcie_parse_fdt_resources(sc) != 0) + goto out; + + /* Clearing interrupts, enabling MSI */ + WR4(sc, IRQ_LOCAL_STATUS, 0xffffffff); + WR4(sc, IRQ_LOCAL_MASK, INTX_MASK | ERROR_MASK | MSI_MASK); + + /* Setting host up */ + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_RP_NEP_OFFSET, + STG_K_RP_NEP, STG_K_RP_NEP); + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_AW_OFFSET, + STG_CKREF_MASK, STG_CKREF_VAL); + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_AW_OFFSET, + STG_CLKREQ, STG_CLKREQ); + + /* Enabling clocks */ + if (clk_enable(sc->clk_noc) != 0) { + device_printf(dev, "could not enable noc clock\n"); + goto out; + } + if (clk_enable(sc->clk_tl) != 0) { + device_printf(dev, "could not enable tl clock\n"); + goto out; + } + if (clk_enable(sc->clk_axi) != 0) { + device_printf(dev, "could not enable axi_mst0 clock\n"); + goto out; + } + if (clk_enable(sc->clk_apb) != 0) { + device_printf(dev, "could not enable apb clock\n"); + goto out; + } + + /* Deasserting resets */ + err = hwreset_deassert(sc->rst_mst0); + if (err != 0) { + device_printf(sc->dev, "cannot deassert 'mst0' reset\n"); + goto out; + } + err = hwreset_deassert(sc->rst_slv0); + if (err != 0) { + device_printf(sc->dev, "cannot deassert 'slv0' reset\n"); + goto out; + } + err = hwreset_deassert(sc->rst_slv); + if (err != 0) { + device_printf(sc->dev, "cannot deassert 'slv' reset\n"); + goto out; + } + err = hwreset_deassert(sc->rst_brg); + if (err != 0) { + device_printf(sc->dev, "cannot deassert 'brg' reset\n"); + goto out; + } + err = hwreset_deassert(sc->rst_core); + if (err != 0) { + device_printf(sc->dev, "cannot deassert 'core' reset\n"); + goto out; + } + err = hwreset_deassert(sc->rst_apb); + if (err != 0) { + device_printf(sc->dev, "cannot deassert 'apb' reset\n"); + goto out; + } + + err = gpio_pin_set_active(sc->perst_pin, true); + if (err != 0) { + device_printf(dev, "Cannot activate gpio pin, error %d\n", err); + goto out; + } + + /* Switching off PHY functions 1-3 */ + for (i = 1; i != 4; i++) { + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_AR_OFFSET, + STG_AXI4_SLVL_AR_MASK, (i << PHY_FUNC_SHIFT) + << STG_AXI4_SLVL_ARFUNC_SHIFT); + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_AW_OFFSET, + STG_AXI4_SLVL_AW_MASK, i << PHY_FUNC_SHIFT); + + val = RD4(sc, PCI_MISC_REG); + WR4(sc, PCI_MISC_REG, val | PHY_FUNC_DIS); + } + + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_AR_OFFSET, + STG_AXI4_SLVL_AR_MASK, 0); + SYSCON_MODIFY_4(sc->stg_syscon, sc->stg_baddr + STG_AW_OFFSET, + STG_AXI4_SLVL_AW_MASK, 0); + + /* Enabling root port */ + val = RD4(sc, PCI_GENERAL_SETUP_REG); + WR4(sc, PCI_GENERAL_SETUP_REG, val | ROOTPORT_ENABLE); + + /* Zeroing RC BAR */ + WR4(sc, PCI_CONF_SPACE_REGS + PCIR_BAR(0), 0); + WR4(sc, PCI_CONF_SPACE_REGS + PCIR_BAR(1), 0); + + /* Setting standard class */ + val = RD4(sc, PCIE_PCI_IDS_REG); + val &= REV_ID_MASK; + val |= (PCI_CLASS_BRIDGE_PCI << PCI_IDS_CLASS_CODE_SHIFT); + WR4(sc, PCIE_PCI_IDS_REG, val); + + /* Disabling latency tolerance reporting */ + val = RD4(sc, PMSG_RX_SUPPORT_REG); + WR4(sc, PMSG_RX_SUPPORT_REG, val & ~PMSG_LTR_SUPPORT); + + /* Setting support for 64-bit pref window */ + val = RD4(sc, PCIE_WINCONF); + WR4(sc, PCIE_WINCONF, val | PREF_MEM_WIN_64_SUPPORT); + + /* Holding PCI endpoint reset (perst) for 100ms, setting the pin */ + DELAY(100); + err = gpio_pin_set_active(sc->perst_pin, false); + if (err != 0) { + device_printf(dev, "Cannot deassert perst pin: %d\n", err); + goto out; + } + + /* Setting up an address translation window */ + jh7110_pcie_set_atr(dev, rman_get_start(sc->cfg_mem_res), 0, + rman_get_size(sc->cfg_mem_res), win_idx); + + err = ofw_pcib_init(dev); + if (err != 0) { + device_printf(dev, "ofw_pcib_init() fails\n"); + goto out; + } + + jh7110_pcie_decode_ranges(sc, sc->ofw_pci.sc_range, + sc->ofw_pci.sc_nrange); + + jh7110_pcie_set_atr(dev, sc->range_mem32.pci, sc->range_mem32.pci, + sc->range_mem32.size, ++win_idx); + jh7110_pcie_set_atr(dev, sc->range_mem64.pci, sc->range_mem64.pci, + sc->range_mem64.size, ++win_idx); + + /* Checking data link status */ + for (i = 0; i != 1000; i++) { + val = SYSCON_READ_4(sc->stg_syscon, + sc->stg_baddr + STG_LNKSTA_OFFSET); + if ((val & STG_LINK_UP) != 0) { + device_printf(dev, "Link up\n"); + break; + } + DELAY(100); + } + if ((val & STG_LINK_UP) == 0) { + device_printf(dev, "Cannot establish data link\n"); + goto out; + } + + /* Setup interrupts */ + rid = 0; + sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); + if (sc->irq_res == NULL) { + device_printf(dev, "Cannot allocate IRQ resource\n"); + err = ENXIO; + goto out_full; + } + + err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_BIO | INTR_MPSAFE, + jh7110_pcie_intr, NULL, sc, &sc->irq_cookie); + if (err != 0) { + device_printf(dev, "Cannot setup interrupt handler\n"); + err = ENXIO; + goto out_full; + } + + sc->isrcs = malloc(sizeof(*sc->isrcs) * MSI_COUNT, M_DEVBUF, + M_WAITOK | M_ZERO); + + snprintf(name, INTR_ISRC_NAMELEN, "%s, MSI", + device_get_nameunit(sc->dev)); + + for (irq = 0; irq < MSI_COUNT; irq++) { + sc->isrcs[irq].irq = irq; + err = intr_isrc_register(&sc->isrcs[irq].isrc, sc->dev, 0, + "%s,%u", name, irq); + if (err != 0) { + device_printf(dev, + "intr_isrs_register failed for MSI irq %d\n", irq); + goto out_full; + } + } + + xref = OF_xref_from_node(sc->node); + OF_device_register_xref(xref, dev); + + err = intr_msi_register(dev, xref); + if (err != 0) { + device_printf(dev, "intr_msi_register() fails\n"); + goto out_full; + } + + device_add_child(dev, "pci", DEVICE_UNIT_ANY); + bus_attach_children(dev); + + return (0); + +out_full: + ofw_pcib_fini(dev); +out: + jh7110_pcie_release_resources(dev); + + return (err); +} + +static device_method_t jh7110_pcie_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, jh7110_pcie_probe), + DEVMETHOD(device_attach, jh7110_pcie_attach), + DEVMETHOD(device_detach, jh7110_pcie_detach), + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, jh7110_pcie_maxslots), + DEVMETHOD(pcib_read_config, jh7110_pcie_read_config), + DEVMETHOD(pcib_write_config, jh7110_pcie_write_config), + DEVMETHOD(pcib_route_interrupt, jh7110_pcie_route_interrupt), + DEVMETHOD(pcib_map_msi, jh7110_pcie_map_msi), + DEVMETHOD(pcib_alloc_msi, jh7110_pcie_alloc_msi), + DEVMETHOD(pcib_release_msi, jh7110_pcie_release_msi), + DEVMETHOD(pcib_alloc_msix, jh7110_pcie_alloc_msix), + DEVMETHOD(pcib_release_msix, jh7110_pcie_release_msix), + DEVMETHOD(pcib_request_feature, pcib_request_feature_allow), + + /* MSI/MSI-X */ + DEVMETHOD(msi_alloc_msi, jh7110_pcie_msi_alloc_msi), + DEVMETHOD(msi_alloc_msix, jh7110_pcie_msi_alloc_msix), + DEVMETHOD(msi_release_msi, jh7110_pcie_msi_release_msi), + DEVMETHOD(msi_release_msix, jh7110_pcie_msi_release_msix), + DEVMETHOD(msi_map_msi, jh7110_pcie_msi_map_msi), + + /* Interrupt controller interface */ + DEVMETHOD(pic_enable_intr, jh7110_pcie_msi_enable_intr), + DEVMETHOD(pic_disable_intr, jh7110_pcie_msi_disable_intr), + DEVMETHOD(pic_post_filter, jh7110_pcie_msi_post_filter), + DEVMETHOD(pic_post_ithread, jh7110_pcie_msi_post_ithread), + DEVMETHOD(pic_pre_ithread, jh7110_pcie_msi_pre_ithread), + + /* OFW bus interface */ + DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), + DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), + DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), + DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), + DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), + + DEVMETHOD_END +}; + +DEFINE_CLASS_1(pcib, jh7110_pcie_driver, jh7110_pcie_methods, + sizeof(struct jh7110_pcie_softc), ofw_pcib_driver); +DRIVER_MODULE(jh7110_pcie, simplebus, jh7110_pcie_driver, NULL, NULL); diff --git a/sys/riscv/vmm/riscv.h b/sys/riscv/vmm/riscv.h index 870d0d6c5cd1..917a333520ed 100644 --- a/sys/riscv/vmm/riscv.h +++ b/sys/riscv/vmm/riscv.h @@ -122,29 +122,6 @@ struct hyptrap { uint64_t htinst; }; -#define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \ - ret_type vmmops_##opname args; - -DEFINE_VMMOPS_IFUNC(int, modinit, (void)) -DEFINE_VMMOPS_IFUNC(int, modcleanup, (void)) -DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap)) -DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging, - uint64_t gla, int prot, uint64_t *gpa, int *is_fault)) -DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap, - struct vm_eventinfo *info)) -DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi)) -DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu, - int vcpu_id)) -DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui)) -DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t scause)) -DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval)) -DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val)) -DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval)) -DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val)) -DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min, - vm_offset_t max)) -DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace)) - #define dprintf(fmt, ...) struct hypctx *riscv_get_active_vcpu(void); diff --git a/sys/riscv/vmm/vmm.c b/sys/riscv/vmm/vmm.c index 7528ef6e4698..4c9b1fa53f7a 100644 --- a/sys/riscv/vmm/vmm.c +++ b/sys/riscv/vmm/vmm.c @@ -92,7 +92,6 @@ struct vcpu { struct fpreg *guestfpu; /* (a,i) guest fpu state */ }; -#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) #define vcpu_lock_destroy(v) mtx_destroy(&((v)->mtx)) #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) @@ -121,7 +120,6 @@ struct vm { bool dying; /* (o) is dying */ volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ - struct vmspace *vmspace; /* (o) guest's address space */ struct vm_mem mem; /* (i) [m+v] guest memory */ char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ struct vcpu **vcpu; /* (i) guest vcpus */ @@ -174,6 +172,7 @@ vcpu_cleanup(struct vcpu *vcpu, bool destroy) vmm_stat_free(vcpu->stats); fpu_save_area_free(vcpu->guestfpu); vcpu_lock_destroy(vcpu); + free(vcpu, M_VMM); } } @@ -285,7 +284,7 @@ vm_init(struct vm *vm, bool create) { int i; - vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace)); + vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm))); MPASS(vm->cookie != NULL); CPU_ZERO(&vm->active_cpus); @@ -347,9 +346,9 @@ vm_alloc_vcpu(struct vm *vm, int vcpuid) } void -vm_slock_vcpus(struct vm *vm) +vm_lock_vcpus(struct vm *vm) { - sx_slock(&vm->vcpus_init_lock); + sx_xlock(&vm->vcpus_init_lock); } void @@ -362,7 +361,7 @@ int vm_create(const char *name, struct vm **retvm) { struct vm *vm; - struct vmspace *vmspace; + int error; /* * If vmm.ko could not be successfully initialized then don't attempt @@ -374,14 +373,13 @@ vm_create(const char *name, struct vm **retvm) if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) return (EINVAL); - vmspace = vmmops_vmspace_alloc(0, 1ul << 39); - if (vmspace == NULL) - return (ENOMEM); - vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO); + error = vm_mem_init(&vm->mem, 0, 1ul << 39); + if (error != 0) { + free(vm, M_VMM); + return (error); + } strcpy(vm->name, name); - vm->vmspace = vmspace; - vm_mem_init(&vm->mem); sx_init(&vm->vcpus_init_lock, "vm vcpus"); vm->sockets = 1; @@ -450,11 +448,6 @@ vm_cleanup(struct vm *vm, bool destroy) if (destroy) { vm_mem_destroy(vm); - vmmops_vmspace_free(vm->vmspace); - vm->vmspace = NULL; - - for (i = 0; i < vm->maxcpus; i++) - free(vm->vcpu[i], M_VMM); free(vm->vcpu, M_VMM); sx_destroy(&vm->vcpus_init_lock); } @@ -760,12 +753,6 @@ vcpu_notify_event(struct vcpu *vcpu) vcpu_unlock(vcpu); } -struct vmspace * -vm_vmspace(struct vm *vm) -{ - return (vm->vmspace); -} - struct vm_mem * vm_mem(struct vm *vm) { @@ -1036,10 +1023,14 @@ vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot, static int vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { + struct vm *vm; + vm = vcpu->vm; vcpu_lock(vcpu); - while (1) { + if (vm->suspend) + break; + if (aplic_check_pending(vcpu->cookie)) break; @@ -1080,7 +1071,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu) vm = vcpu->vm; vme = &vcpu->exitinfo; - pmap = vmspace_pmap(vm->vmspace); + pmap = vmspace_pmap(vm_vmspace(vm)); addr = (vme->htval << 2) & ~(PAGE_SIZE - 1); dprintf("%s: %lx\n", __func__, addr); @@ -1103,7 +1094,7 @@ vm_handle_paging(struct vcpu *vcpu, bool *retu) if (pmap_fault(pmap, addr, ftype)) return (0); - map = &vm->vmspace->vm_map; + map = &vm_vmspace(vm)->vm_map; rv = vm_fault(map, addr, ftype, VM_FAULT_NORMAL, NULL); if (rv != KERN_SUCCESS) { printf("%s: vm_fault failed, addr %lx, ftype %d, err %d\n", @@ -1185,7 +1176,7 @@ vm_run(struct vcpu *vcpu) if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) return (EINVAL); - pmap = vmspace_pmap(vm->vmspace); + pmap = vmspace_pmap(vm_vmspace(vm)); vme = &vcpu->exitinfo; evinfo.rptr = NULL; evinfo.sptr = &vm->suspend; |