diff options
Diffstat (limited to 'sys/arm64')
30 files changed, 489 insertions, 128 deletions
diff --git a/sys/arm64/apple/apple_pinctrl.c b/sys/arm64/apple/apple_pinctrl.c index ebaaccea1d99..c28b1c62d78c 100644 --- a/sys/arm64/apple/apple_pinctrl.c +++ b/sys/arm64/apple/apple_pinctrl.c @@ -171,12 +171,13 @@ apple_pinctrl_attach(device_t dev) OF_xref_from_node(ofw_bus_get_node(dev))); } - sc->sc_busdev = gpiobus_attach_bus(dev); + sc->sc_busdev = gpiobus_add_bus(dev); if (sc->sc_busdev == NULL) { device_printf(dev, "failed to attach gpiobus\n"); goto error; } + bus_attach_children(dev); return (0); error: mtx_destroy(&sc->sc_mtx); diff --git a/sys/arm64/arm64/cpu_feat.c b/sys/arm64/arm64/cpu_feat.c index cc262394913d..94114d47f846 100644 --- a/sys/arm64/arm64/cpu_feat.c +++ b/sys/arm64/arm64/cpu_feat.c @@ -32,16 +32,21 @@ #include <machine/cpu.h> #include <machine/cpu_feat.h> +SYSCTL_NODE(_hw, OID_AUTO, feat, CTLFLAG_RD, 0, "CPU features/errata"); + /* TODO: Make this a list if we ever grow a callback other than smccc_errata */ static cpu_feat_errata_check_fn cpu_feat_check_cb = NULL; void enable_cpu_feat(uint32_t stage) { + char tunable[32]; struct cpu_feat **featp, *feat; uint32_t midr; u_int errata_count, *errata_list; cpu_feat_errata errata_status; + cpu_feat_en check_status; + bool val; MPASS((stage & ~CPU_FEAT_STAGE_MASK) == 0); @@ -49,6 +54,21 @@ enable_cpu_feat(uint32_t stage) SET_FOREACH(featp, cpu_feat_set) { feat = *featp; + /* Read any tunable the user may have set */ + if (stage == CPU_FEAT_EARLY_BOOT && PCPU_GET(cpuid) == 0) { + snprintf(tunable, sizeof(tunable), "hw.feat.%s", + feat->feat_name); + if (TUNABLE_BOOL_FETCH(tunable, &val)) { + if (val) { + feat->feat_flags |= + CPU_FEAT_USER_ENABLED; + } else { + feat->feat_flags |= + CPU_FEAT_USER_DISABLED; + } + } + } + /* Run the enablement code at the correct stage of boot */ if ((feat->feat_flags & CPU_FEAT_STAGE_MASK) != stage) continue; @@ -58,8 +78,26 @@ enable_cpu_feat(uint32_t stage) PCPU_GET(cpuid) != 0) continue; - if (feat->feat_check != NULL && !feat->feat_check(feat, midr)) - continue; + if (feat->feat_check != NULL) { + check_status = feat->feat_check(feat, midr); + } else { + check_status = FEAT_DEFAULT_ENABLE; + } + /* Ignore features that are not present */ + if (check_status == FEAT_ALWAYS_DISABLE) + goto next; + + /* The user disabled the feature */ + if ((feat->feat_flags & CPU_FEAT_USER_DISABLED) != 0) + goto next; + + /* + * The feature was disabled by default and the user + * didn't enable it then skip. + */ + if (check_status == FEAT_DEFAULT_DISABLE && + (feat->feat_flags & CPU_FEAT_USER_ENABLED) == 0) + goto next; /* * Check if the feature has any errata that may need a @@ -97,8 +135,13 @@ enable_cpu_feat(uint32_t stage) /* Shouldn't be possible */ MPASS(errata_status != ERRATA_UNKNOWN); - feat->feat_enable(feat, errata_status, errata_list, - errata_count); + if (feat->feat_enable(feat, errata_status, errata_list, + errata_count)) + feat->feat_enabled = true; + +next: + if (!feat->feat_enabled && feat->feat_disabled != NULL) + feat->feat_disabled(feat); } } diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c index 0f46e44f5d6a..0301eb91c9ef 100644 --- a/sys/arm64/arm64/efirt_machdep.c +++ b/sys/arm64/arm64/efirt_machdep.c @@ -241,6 +241,7 @@ fail: int efi_arch_enter(void) { + uint64_t tcr; CRITICAL_ASSERT(curthread); curthread->td_md.md_efirt_dis_pf = vm_fault_disable_pagefaults(); @@ -249,7 +250,17 @@ efi_arch_enter(void) * Temporarily switch to EFI's page table. However, we leave curpmap * unchanged in order to prevent its ASID from being reclaimed before * we switch back to its page table in efi_arch_leave(). + * + * UEFI sdoesn't care about TBI, so enable it. It's more likely + * userspace will have TBI on as it's only disabled for backwards + * compatibility. */ + tcr = READ_SPECIALREG(tcr_el1); + if ((tcr & MD_TCR_FIELDS) != TCR_TBI0) { + tcr &= ~MD_TCR_FIELDS; + tcr |= TCR_TBI0; + WRITE_SPECIALREG(tcr_el1, tcr); + } set_ttbr0(efi_ttbr0); if (PCPU_GET(bcast_tlbi_workaround) != 0) invalidate_local_icache(); @@ -260,6 +271,7 @@ efi_arch_enter(void) void efi_arch_leave(void) { + uint64_t proc_tcr, tcr; /* * Restore the pcpu pointer. Some UEFI implementations trash it and @@ -271,6 +283,13 @@ efi_arch_leave(void) __asm __volatile( "mrs x18, tpidr_el1 \n" ); + proc_tcr = curthread->td_proc->p_md.md_tcr; + tcr = READ_SPECIALREG(tcr_el1); + if ((tcr & MD_TCR_FIELDS) != proc_tcr) { + tcr &= ~MD_TCR_FIELDS; + tcr |= proc_tcr; + WRITE_SPECIALREG(tcr_el1, tcr); + } set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap))); if (PCPU_GET(bcast_tlbi_workaround) != 0) invalidate_local_icache(); diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c index 7cd5327b9f1b..8f8a934ad520 100644 --- a/sys/arm64/arm64/elf32_machdep.c +++ b/sys/arm64/arm64/elf32_machdep.c @@ -195,7 +195,7 @@ freebsd32_fetch_syscall_args(struct thread *td) register_t *ap; struct syscall_args *sa; int error, i, nap, narg; - unsigned int args[4]; + unsigned int args[6]; nap = 4; p = td->td_proc; @@ -225,7 +225,7 @@ freebsd32_fetch_syscall_args(struct thread *td) sa->args[i] = ap[i]; if (narg > nap) { if (narg - nap > nitems(args)) - panic("Too many system call arguiments"); + panic("Too many system call arguments"); error = copyin((void *)td->td_frame->tf_x[13], args, (narg - nap) * sizeof(int)); if (error != 0) diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c index 970dba0ca7d9..13af5c5065d6 100644 --- a/sys/arm64/arm64/elf_machdep.c +++ b/sys/arm64/arm64/elf_machdep.c @@ -65,7 +65,13 @@ u_long __read_frequently linux_elf_hwcap2; u_long __read_frequently linux_elf_hwcap3; u_long __read_frequently linux_elf_hwcap4; -struct arm64_addr_mask elf64_addr_mask; +struct arm64_addr_mask elf64_addr_mask = { + .code = TBI_ADDR_MASK, + .data = TBI_ADDR_MASK, +}; +#ifdef COMPAT_FREEBSD14 +struct arm64_addr_mask elf64_addr_mask_14; +#endif static void arm64_exec_protect(struct image_params *, int); @@ -136,7 +142,14 @@ get_arm64_addr_mask(struct regset *rs, struct thread *td, void *buf, if (buf != NULL) { KASSERT(*sizep == sizeof(elf64_addr_mask), ("%s: invalid size", __func__)); - memcpy(buf, &elf64_addr_mask, sizeof(elf64_addr_mask)); +#ifdef COMPAT_FREEBSD14 + /* running an old binary use the old address mask */ + if (td->td_proc->p_osrel < TBI_VERSION) + memcpy(buf, &elf64_addr_mask_14, + sizeof(elf64_addr_mask_14)); + else +#endif + memcpy(buf, &elf64_addr_mask, sizeof(elf64_addr_mask)); } *sizep = sizeof(elf64_addr_mask); diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S index 13095def8b00..5a4181348a54 100644 --- a/sys/arm64/arm64/exception.S +++ b/sys/arm64/arm64/exception.S @@ -42,10 +42,9 @@ */ .macro save_registers_head el .if \el == 1 - mov x18, sp - stp x0, x1, [sp, #(TF_X - TF_SIZE - 128)]! + stp x0, x1, [sp, #-(TF_SIZE - TF_X + 128)]! .else - stp x0, x1, [sp, #(TF_X - TF_SIZE)]! + stp x0, x1, [sp, #-(TF_SIZE - TF_X)]! .endif stp x2, x3, [sp, #(2 * 8)] stp x4, x5, [sp, #(4 * 8)] @@ -61,7 +60,9 @@ stp x24, x25, [sp, #(24 * 8)] stp x26, x27, [sp, #(26 * 8)] stp x28, x29, [sp, #(28 * 8)] -.if \el == 0 +.if \el == 1 + add x18, sp, #(TF_SIZE - TF_X + 128) +.else mrs x18, sp_el0 .endif mrs x10, elr_el1 diff --git a/sys/arm64/arm64/exec_machdep.c b/sys/arm64/arm64/exec_machdep.c index 751329affd91..7c50dc93fdb4 100644 --- a/sys/arm64/arm64/exec_machdep.c +++ b/sys/arm64/arm64/exec_machdep.c @@ -51,6 +51,7 @@ #include <vm/vm_map.h> #include <machine/armreg.h> +#include <machine/elf.h> #include <machine/kdb.h> #include <machine/md_var.h> #include <machine/pcb.h> @@ -411,6 +412,7 @@ exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf = td->td_frame; struct pcb *pcb = td->td_pcb; + uint64_t new_tcr, tcr; memset(tf, 0, sizeof(struct trapframe)); @@ -433,6 +435,35 @@ exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) */ bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs)); + /* If the process is new enough enable TBI */ + if (td->td_proc->p_osrel >= TBI_VERSION) + new_tcr = TCR_TBI0; + else + new_tcr = 0; + td->td_proc->p_md.md_tcr = new_tcr; + + /* TODO: should create a pmap function for this... */ + tcr = READ_SPECIALREG(tcr_el1); + if ((tcr & MD_TCR_FIELDS) != new_tcr) { + uint64_t asid; + + tcr &= ~MD_TCR_FIELDS; + tcr |= new_tcr; + WRITE_SPECIALREG(tcr_el1, tcr); + isb(); + + /* + * TCR_EL1.TBI0 is permitted to be cached in the TLB, so + * we need to perform a TLB invalidation. + */ + asid = READ_SPECIALREG(ttbr0_el1) & TTBR_ASID_MASK; + __asm __volatile( + "tlbi aside1is, %0 \n" + "dsb ish \n" + "isb \n" + : : "r" (asid)); + } + /* Generate new pointer authentication keys */ ptrauth_exec(td); } diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c index e3977798b046..22696796e69d 100644 --- a/sys/arm64/arm64/genassym.c +++ b/sys/arm64/arm64/genassym.c @@ -64,6 +64,7 @@ ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(P_PID, offsetof(struct proc, p_pid)); +ASSYM(P_MD_TCR, offsetof(struct proc, p_md.md_tcr)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c index bcacea43ad2f..2d07420bcdb0 100644 --- a/sys/arm64/arm64/identcpu.c +++ b/sys/arm64/arm64/identcpu.c @@ -232,6 +232,10 @@ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_CORTEX_X2, "Cortex-X2" }, { CPU_PART_CORTEX_X3, "Cortex-X3" }, { CPU_PART_CORTEX_X4, "Cortex-X4" }, + { CPU_PART_C1_NANO, "C1-Nano" }, + { CPU_PART_C1_PRO, "C1-Pro" }, + { CPU_PART_C1_PREMIUM, "C1-Premium" }, + { CPU_PART_C1_ULTRA, "C1-Ultra" }, { CPU_PART_NEOVERSE_E1, "Neoverse-E1" }, { CPU_PART_NEOVERSE_N1, "Neoverse-N1" }, { CPU_PART_NEOVERSE_N2, "Neoverse-N2" }, @@ -2272,37 +2276,25 @@ static const struct mrs_user_reg user_regs[] = { static bool user_ctr_has_neoverse_n1_1542419(uint32_t midr, uint64_t ctr) { - /* Skip non-Neoverse-N1 */ - if (!CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM, - CPU_PART_NEOVERSE_N1, 0, 0)) - return (false); - - switch (CPU_VAR(midr)) { - default: - break; - case 4: - /* Fixed in r4p1 */ - if (CPU_REV(midr) > 0) - break; - /* FALLTHROUGH */ - case 3: - /* If DIC is enabled (coherent icache) then we are affected */ - return (CTR_DIC_VAL(ctr) != 0); - } - - return (false); + /* + * Neoverse-N1 erratum 1542419 + * Present in r3p0 - r4p0 + * Fixed in r4p1 + */ + return (midr_check_var_part_range(midr, CPU_IMPL_ARM, + CPU_PART_NEOVERSE_N1, 3, 0, 4, 0) && CTR_DIC_VAL(ctr) != 0); } -static bool -user_ctr_check(const struct cpu_feat *feat __unused, u_int midr __unused) +static cpu_feat_en +user_ctr_check(const struct cpu_feat *feat __unused, u_int midr) { if (emulate_ctr) - return (true); + return (FEAT_DEFAULT_ENABLE); if (user_ctr_has_neoverse_n1_1542419(midr, READ_SPECIALREG(ctr_el0))) - return (true); + return (FEAT_DEFAULT_ENABLE); - return (false); + return (FEAT_ALWAYS_DISABLE); } static bool @@ -2320,7 +2312,7 @@ user_ctr_has_errata(const struct cpu_feat *feat __unused, u_int midr, return (false); } -static void +static bool user_ctr_enable(const struct cpu_feat *feat __unused, cpu_feat_errata errata_status, u_int *errata_list, u_int errata_count) { @@ -2356,16 +2348,13 @@ user_ctr_enable(const struct cpu_feat *feat __unused, WRITE_SPECIALREG(sctlr_el1, READ_SPECIALREG(sctlr_el1) & ~SCTLR_UCT); isb(); + + return (true); } -static struct cpu_feat user_ctr = { - .feat_name = "Trap CTR_EL0", - .feat_check = user_ctr_check, - .feat_has_errata = user_ctr_has_errata, - .feat_enable = user_ctr_enable, - .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU, -}; -DATA_SET(cpu_feat_set, user_ctr); +CPU_FEAT(trap_ctr, "Trap CTR_EL0", + user_ctr_check, user_ctr_has_errata, user_ctr_enable, NULL, + CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU); static bool user_ctr_handler(uint64_t esr, struct trapframe *frame) diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S index 4a10a2b4f2d3..50a3eda846da 100644 --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -39,6 +39,23 @@ #define VIRT_BITS 48 +/* + * Loads a 64-bit value into reg using 1 to 4 mov/movk instructions. + * This can be used early on when we don't know the CPUs endianness. + */ +.macro mov_q reg, val + mov \reg, :abs_g0_nc:\val +.if (\val >> 16) & 0xffff != 0 + movk \reg, :abs_g1_nc:\val +.endif +.if (\val >> 32) & 0xffff != 0 + movk \reg, :abs_g2_nc:\val +.endif +.if (\val >> 48) & 0xffff != 0 + movk \reg, :abs_g3:\val +.endif +.endm + #if PAGE_SIZE == PAGE_SIZE_16K /* * The number of level 3 tables to create. 32 will allow for 1G of address @@ -324,15 +341,23 @@ LENTRY(enter_kernel_el) cmp x23, #(CURRENTEL_EL_EL2) b.eq 1f - ldr x2, =SCTLR_MMU_OFF + /* + * Ensure there are no memory operations here. If the boot loader + * enters the kernel in big-endian mode then loading sctlr will + * be incorrect. As instructions are the same in both endians it is + * safe to use mov instructions. + */ + mov_q x2, SCTLR_MMU_OFF msr sctlr_el1, x2 - /* SCTLR_EOS is set so eret is a context synchronizing event so we + /* + * SCTLR_EOS is set to make eret a context synchronizing event. We * need an isb here to ensure it's observed by later instructions, * but don't need it in the eret below. */ isb - /* Ensure SPSR_EL1 and pstate are in sync. The only wat to set the + /* + * Ensure SPSR_EL1 and pstate are in sync. The only way to set the * latter is to set the former and return from an exception with eret. */ mov x2, #(PSR_DAIF | PSR_M_EL1h) @@ -346,11 +371,19 @@ LENTRY(enter_kernel_el) * Set just the reserved bits in sctlr_el2. This will disable the * MMU which may have broken the kernel if we enter the kernel in * EL2, e.g. when using VHE. + * + * As with sctlr_el1 above use mov instructions to ensure there are + * no memory operations. */ - ldr x2, =(SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS) + mov_q x2, (SCTLR_EL2_RES1 | SCTLR_EL2_EIS | SCTLR_EL2_EOS) msr sctlr_el2, x2 isb + /* + * The hardware is now in little-endian mode so memory operations + * are safe. + */ + /* Configure the Hypervisor */ ldr x2, =(HCR_RW | HCR_APK | HCR_API) msr hcr_el2, x2 diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c index 53856dd90cae..322bad273a08 100644 --- a/sys/arm64/arm64/machdep.c +++ b/sys/arm64/arm64/machdep.c @@ -173,16 +173,20 @@ SYSINIT(ssp_warn, SI_SUB_COPYRIGHT, SI_ORDER_ANY, print_ssp_warning, NULL); SYSINIT(ssp_warn2, SI_SUB_LAST, SI_ORDER_ANY, print_ssp_warning, NULL); #endif -static bool +static cpu_feat_en pan_check(const struct cpu_feat *feat __unused, u_int midr __unused) { uint64_t id_aa64mfr1; - id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); - return (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE); + if (!get_kernel_reg(ID_AA64MMFR1_EL1, &id_aa64mfr1)) + return (FEAT_ALWAYS_DISABLE); + if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) == ID_AA64MMFR1_PAN_NONE) + return (FEAT_ALWAYS_DISABLE); + + return (FEAT_DEFAULT_ENABLE); } -static void +static bool pan_enable(const struct cpu_feat *feat __unused, cpu_feat_errata errata_status __unused, u_int *errata_list __unused, u_int errata_count __unused) @@ -200,15 +204,20 @@ pan_enable(const struct cpu_feat *feat __unused, ".arch_extension pan \n" "msr pan, #1 \n" ".arch_extension nopan \n"); + + return (true); } -static struct cpu_feat feat_pan = { - .feat_name = "FEAT_PAN", - .feat_check = pan_check, - .feat_enable = pan_enable, - .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU, -}; -DATA_SET(cpu_feat_set, feat_pan); +static void +pan_disabled(const struct cpu_feat *feat __unused) +{ + if (PCPU_GET(cpuid) == 0) + update_special_reg(ID_AA64MMFR1_EL1, ID_AA64MMFR1_PAN_MASK, 0); +} + +CPU_FEAT(feat_pan, "Privileged access never", + pan_check, NULL, pan_enable, pan_disabled, + CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU); bool has_hyp(void) @@ -857,7 +866,7 @@ initarm(struct arm64_bootparams *abp) cninit(); set_ttbr0(abp->kern_ttbr0); - cpu_tlb_flushID(); + pmap_s1_invalidate_all_kernel(); if (!valid) panic("Invalid bus configuration: %s", diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 07955866bd1c..dbf5c820d20b 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -190,6 +190,8 @@ pt_entry_t __read_mostly pmap_gp_attr; #define PMAP_SAN_PTE_BITS (ATTR_AF | ATTR_S1_XN | pmap_sh_attr | \ ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW)) +static bool __read_mostly pmap_multiple_tlbi = false; + struct pmap_large_md_page { struct rwlock pv_lock; struct md_page pv_page; @@ -469,7 +471,7 @@ static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va); static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); -static bool pmap_activate_int(pmap_t pmap); +static bool pmap_activate_int(struct thread *td, pmap_t pmap); static void pmap_alloc_asid(pmap_t pmap); static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot, int mode, bool skip_unmapped); @@ -1297,7 +1299,7 @@ pmap_bootstrap_dmap(vm_size_t kernlen) } } - cpu_tlb_flushID(); + pmap_s1_invalidate_all_kernel(); bs_state.dmap_valid = true; @@ -1399,7 +1401,7 @@ pmap_bootstrap(void) /* And the l3 tables for the early devmap */ pmap_bootstrap_l3(VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE)); - cpu_tlb_flushID(); + pmap_s1_invalidate_all_kernel(); #define alloc_pages(var, np) \ (var) = bs_state.freemempos; \ @@ -1656,14 +1658,17 @@ pmap_init_pv_table(void) } } -static bool +static cpu_feat_en pmap_dbm_check(const struct cpu_feat *feat __unused, u_int midr __unused) { uint64_t id_aa64mmfr1; id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); - return (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >= - ID_AA64MMFR1_HAFDBS_AF_DBS); + if (ID_AA64MMFR1_HAFDBS_VAL(id_aa64mmfr1) >= + ID_AA64MMFR1_HAFDBS_AF_DBS) + return (FEAT_DEFAULT_ENABLE); + + return (FEAT_ALWAYS_DISABLE); } static bool @@ -1671,8 +1676,8 @@ pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr, u_int **errata_list, u_int *errata_count) { /* Disable on Cortex-A55 for erratum 1024718 - all revisions */ - if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK, CPU_IMPL_ARM, - CPU_PART_CORTEX_A55, 0, 0)) { + if (CPU_IMPL(midr) == CPU_IMPL_ARM && + CPU_PART(midr) == CPU_PART_CORTEX_A55) { static u_int errata_id = 1024718; *errata_list = &errata_id; @@ -1681,21 +1686,19 @@ pmap_dbm_has_errata(const struct cpu_feat *feat __unused, u_int midr, } /* Disable on Cortex-A510 for erratum 2051678 - r0p0 to r0p2 */ - if (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_VAR_MASK, - CPU_IMPL_ARM, CPU_PART_CORTEX_A510, 0, 0)) { - if (CPU_REV(PCPU_GET(midr)) < 3) { - static u_int errata_id = 2051678; + if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510, + 0, 0, 0, 2)) { + static u_int errata_id = 2051678; - *errata_list = &errata_id; - *errata_count = 1; - return (true); - } + *errata_list = &errata_id; + *errata_count = 1; + return (true); } return (false); } -static void +static bool pmap_dbm_enable(const struct cpu_feat *feat __unused, cpu_feat_errata errata_status, u_int *errata_list __unused, u_int errata_count) @@ -1704,7 +1707,7 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused, /* Skip if there is an erratum affecting DBM */ if (errata_status != ERRATA_NONE) - return; + return (false); tcr = READ_SPECIALREG(tcr_el1) | TCR_HD; WRITE_SPECIALREG(tcr_el1, tcr); @@ -1714,16 +1717,58 @@ pmap_dbm_enable(const struct cpu_feat *feat __unused, __asm __volatile("tlbi vmalle1"); dsb(nsh); isb(); + + return (true); } -static struct cpu_feat feat_dbm = { - .feat_name = "FEAT_HAFDBS (DBM)", - .feat_check = pmap_dbm_check, - .feat_has_errata = pmap_dbm_has_errata, - .feat_enable = pmap_dbm_enable, - .feat_flags = CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU, -}; -DATA_SET(cpu_feat_set, feat_dbm); +CPU_FEAT(feat_hafdbs, "Hardware management of the Access flag and dirty state", + pmap_dbm_check, pmap_dbm_has_errata, pmap_dbm_enable, NULL, + CPU_FEAT_AFTER_DEV | CPU_FEAT_PER_CPU); + +static cpu_feat_en +pmap_multiple_tlbi_check(const struct cpu_feat *feat __unused, u_int midr) +{ + /* + * Cortex-A55 erratum 2441007 (Cat B rare) + * Present in all revisions + */ + if (CPU_IMPL(midr) == CPU_IMPL_ARM && + CPU_PART(midr) == CPU_PART_CORTEX_A55) + return (FEAT_DEFAULT_DISABLE); + + /* + * Cortex-A76 erratum 1286807 (Cat B rare) + * Present in r0p0 - r3p0 + * Fixed in r3p1 + */ + if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A76, + 0, 0, 3, 0)) + return (FEAT_DEFAULT_DISABLE); + + /* + * Cortex-A510 erratum 2441009 (Cat B rare) + * Present in r0p0 - r1p1 + * Fixed in r1p2 + */ + if (midr_check_var_part_range(midr, CPU_IMPL_ARM, CPU_PART_CORTEX_A510, + 0, 0, 1, 1)) + return (FEAT_DEFAULT_DISABLE); + + return (FEAT_ALWAYS_DISABLE); +} + +static bool +pmap_multiple_tlbi_enable(const struct cpu_feat *feat __unused, + cpu_feat_errata errata_status, u_int *errata_list __unused, + u_int errata_count __unused) +{ + pmap_multiple_tlbi = true; + return (true); +} + +CPU_FEAT(errata_multi_tlbi, "Multiple TLBI errata", + pmap_multiple_tlbi_check, NULL, pmap_multiple_tlbi_enable, NULL, + CPU_FEAT_EARLY_BOOT | CPU_FEAT_PER_CPU); /* * Initialize the pmap module. @@ -1878,9 +1923,17 @@ pmap_s1_invalidate_page(pmap_t pmap, vm_offset_t va, bool final_only) r = TLBI_VA(va); if (pmap == kernel_pmap) { pmap_s1_invalidate_kernel(r, final_only); + if (pmap_multiple_tlbi) { + dsb(ish); + pmap_s1_invalidate_kernel(r, final_only); + } } else { r |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); pmap_s1_invalidate_user(r, final_only); + if (pmap_multiple_tlbi) { + dsb(ish); + pmap_s1_invalidate_user(r, final_only); + } } dsb(ish); isb(); @@ -1922,12 +1975,24 @@ pmap_s1_invalidate_strided(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, end = TLBI_VA(eva); for (r = start; r < end; r += TLBI_VA(stride)) pmap_s1_invalidate_kernel(r, final_only); + + if (pmap_multiple_tlbi) { + dsb(ish); + for (r = start; r < end; r += TLBI_VA(stride)) + pmap_s1_invalidate_kernel(r, final_only); + } } else { start = end = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); start |= TLBI_VA(sva); end |= TLBI_VA(eva); for (r = start; r < end; r += TLBI_VA(stride)) pmap_s1_invalidate_user(r, final_only); + + if (pmap_multiple_tlbi) { + dsb(ish); + for (r = start; r < end; r += TLBI_VA(stride)) + pmap_s1_invalidate_user(r, final_only); + } } dsb(ish); isb(); @@ -1963,6 +2028,19 @@ pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pmap_s2_invalidate_range(pmap, sva, eva, final_only); } +void +pmap_s1_invalidate_all_kernel(void) +{ + dsb(ishst); + __asm __volatile("tlbi vmalle1is"); + dsb(ish); + if (pmap_multiple_tlbi) { + __asm __volatile("tlbi vmalle1is"); + dsb(ish); + } + isb(); +} + /* * Invalidates all cached intermediate- and final-level TLB entries for the * given virtual address space. @@ -1977,9 +2055,17 @@ pmap_s1_invalidate_all(pmap_t pmap) dsb(ishst); if (pmap == kernel_pmap) { __asm __volatile("tlbi vmalle1is"); + if (pmap_multiple_tlbi) { + dsb(ish); + __asm __volatile("tlbi vmalle1is"); + } } else { r = ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); __asm __volatile("tlbi aside1is, %0" : : "r" (r)); + if (pmap_multiple_tlbi) { + dsb(ish); + __asm __volatile("tlbi aside1is, %0" : : "r" (r)); + } } dsb(ish); isb(); @@ -7967,7 +8053,7 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size) pa += L2_SIZE; } if ((old_l2e & ATTR_DESCR_VALID) != 0) - pmap_s1_invalidate_all(kernel_pmap); + pmap_s1_invalidate_all_kernel(); else { /* * Because the old entries were invalid and the new @@ -8058,7 +8144,7 @@ pmap_unmapbios(void *p, vm_size_t size) } } if (preinit_map) { - pmap_s1_invalidate_all(kernel_pmap); + pmap_s1_invalidate_all_kernel(); return; } @@ -9113,7 +9199,7 @@ pmap_init_cnp(void *dummy __unused) SYSINIT(pmap_init_cnp, SI_SUB_SMP, SI_ORDER_ANY, pmap_init_cnp, NULL); static bool -pmap_activate_int(pmap_t pmap) +pmap_activate_int(struct thread *td, pmap_t pmap) { struct asid_set *set; int epoch; @@ -9152,6 +9238,15 @@ pmap_activate_int(pmap_t pmap) pmap_alloc_asid(pmap); if (pmap->pm_stage == PM_STAGE1) { + uint64_t new_tcr, tcr; + + new_tcr = td->td_proc->p_md.md_tcr; + tcr = READ_SPECIALREG(tcr_el1); + if ((tcr & MD_TCR_FIELDS) != new_tcr) { + tcr &= ~MD_TCR_FIELDS; + tcr |= new_tcr; + WRITE_SPECIALREG(tcr_el1, tcr); + } set_ttbr0(pmap_to_ttbr0(pmap)); if (PCPU_GET(bcast_tlbi_workaround) != 0) invalidate_local_icache(); @@ -9165,7 +9260,7 @@ pmap_activate_vm(pmap_t pmap) PMAP_ASSERT_STAGE2(pmap); - (void)pmap_activate_int(pmap); + (void)pmap_activate_int(NULL, pmap); } void @@ -9176,7 +9271,7 @@ pmap_activate(struct thread *td) pmap = vmspace_pmap(td->td_proc->p_vmspace); PMAP_ASSERT_STAGE1(pmap); critical_enter(); - (void)pmap_activate_int(pmap); + (void)pmap_activate_int(td, pmap); critical_exit(); } @@ -9202,7 +9297,7 @@ pmap_switch(struct thread *new) * to a user process. */ - if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) { + if (pmap_activate_int(new, vmspace_pmap(new->td_proc->p_vmspace))) { /* * Stop userspace from training the branch predictor against * other processes. This will call into a CPU specific diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c index 767b7e115479..ab40b72887e9 100644 --- a/sys/arm64/arm64/ptrauth.c +++ b/sys/arm64/arm64/ptrauth.c @@ -82,7 +82,7 @@ ptrauth_disable(void) return (false); } -static bool +static cpu_feat_en ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused) { uint64_t isar; @@ -97,11 +97,11 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused) if (!pac_enable) { if (boothowto & RB_VERBOSE) printf("Pointer authentication is disabled\n"); - goto out; + return (FEAT_ALWAYS_DISABLE); } if (ptrauth_disable()) - goto out; + return (FEAT_ALWAYS_DISABLE); /* * This assumes if there is pointer authentication on the boot CPU @@ -116,32 +116,21 @@ ptrauth_check(const struct cpu_feat *feat __unused, u_int midr __unused) if (get_kernel_reg(ID_AA64ISAR1_EL1, &isar)) { if (ID_AA64ISAR1_APA_VAL(isar) > 0 || ID_AA64ISAR1_API_VAL(isar) > 0) { - return (true); + return (FEAT_DEFAULT_ENABLE); } } /* The QARMA3 algorithm is reported in ID_AA64ISAR2_EL1. */ if (get_kernel_reg(ID_AA64ISAR2_EL1, &isar)) { if (ID_AA64ISAR2_APA3_VAL(isar) > 0) { - return (true); + return (FEAT_DEFAULT_ENABLE); } } -out: - /* - * Pointer authentication may be disabled, mask out the ID fields we - * expose to userspace and the rest of the kernel so they don't try - * to use it. - */ - update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK | - ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK | - ID_AA64ISAR1_GPI_MASK, 0); - update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0); - - return (false); + return (FEAT_ALWAYS_DISABLE); } -static void +static bool ptrauth_enable(const struct cpu_feat *feat __unused, cpu_feat_errata errata_status __unused, u_int *errata_list __unused, u_int errata_count __unused) @@ -149,16 +138,34 @@ ptrauth_enable(const struct cpu_feat *feat __unused, enable_ptrauth = true; elf64_addr_mask.code |= PAC_ADDR_MASK; elf64_addr_mask.data |= PAC_ADDR_MASK; +#ifdef COMPAT_FREEBSD14 + elf64_addr_mask_14.code |= PAC_ADDR_MASK_14; + elf64_addr_mask_14.data |= PAC_ADDR_MASK_14; +#endif + + return (true); } +static void +ptrauth_disabled(const struct cpu_feat *feat __unused) +{ + /* + * Pointer authentication may be disabled, mask out the ID fields we + * expose to userspace and the rest of the kernel so they don't try + * to use it. + */ + if (PCPU_GET(cpuid) == 0) { + update_special_reg(ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_MASK | + ID_AA64ISAR1_APA_MASK | ID_AA64ISAR1_GPA_MASK | + ID_AA64ISAR1_GPI_MASK, 0); + update_special_reg(ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_MASK, 0); + } + +} -static struct cpu_feat feat_pauth = { - .feat_name = "FEAT_PAuth", - .feat_check = ptrauth_check, - .feat_enable = ptrauth_enable, - .feat_flags = CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM, -}; -DATA_SET(cpu_feat_set, feat_pauth); +CPU_FEAT(feat_pauth, "Pointer Authentication", + ptrauth_check, NULL, ptrauth_enable, ptrauth_disabled, + CPU_FEAT_EARLY_BOOT | CPU_FEAT_SYSTEM); /* Copy the keys when forking a new process */ void diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S index 7b6010a5f51f..a461fded929c 100644 --- a/sys/arm64/arm64/swtch.S +++ b/sys/arm64/arm64/swtch.S @@ -37,6 +37,8 @@ #include <machine/asm.h> #include <machine/armreg.h> +#include <machine/proc.h> + .macro clear_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 @@ -239,6 +241,16 @@ ENTRY(fork_trampoline) msr daifset, #(DAIF_D | DAIF_INTR) ldr x0, [x18, #PC_CURTHREAD] + + /* Set the per-process tcr_el1 fields */ + ldr x10, [x0, #TD_PROC] + ldr x10, [x10, #P_MD_TCR] + mrs x11, tcr_el1 + and x11, x11, #(~MD_TCR_FIELDS) + orr x11, x11, x10 + msr tcr_el1, x11 + /* No isb as the eret below is the context-synchronising event */ + bl ptrauth_enter_el0 /* Restore sp, lr, elr, and spsr */ diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c index bed58095201a..75c9b5f87892 100644 --- a/sys/arm64/arm64/trap.c +++ b/sys/arm64/arm64/trap.c @@ -246,6 +246,7 @@ external_abort(struct thread *td, struct trapframe *frame, uint64_t esr, print_registers(frame); print_gp_register("far", far); + printf(" esr: 0x%.16lx\n", esr); panic("Unhandled external data abort"); } diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c index 38a126ff602f..0134feb65b6a 100644 --- a/sys/arm64/arm64/vm_machdep.c +++ b/sys/arm64/arm64/vm_machdep.c @@ -120,6 +120,9 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_daif = PSR_DAIF_DEFAULT; + /* Copy the TCR_EL1 value */ + td2->td_proc->p_md.md_tcr = td1->td_proc->p_md.md_tcr; + #if defined(PERTHREAD_SSP) /* Set the new canary */ arc4random_buf(&td2->td_md.md_canary, sizeof(td2->td_md.md_canary)); diff --git a/sys/arm64/conf/std.arm64 b/sys/arm64/conf/std.arm64 index c83e98c17a33..a0568466cfaf 100644 --- a/sys/arm64/conf/std.arm64 +++ b/sys/arm64/conf/std.arm64 @@ -7,6 +7,7 @@ makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options NUMA # Non-Uniform Memory Architecture support options PREEMPTION # Enable kernel thread preemption +options EXTERR_STRINGS options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols diff --git a/sys/arm64/conf/std.dev b/sys/arm64/conf/std.dev index c5c364ffda04..719f272426dd 100644 --- a/sys/arm64/conf/std.dev +++ b/sys/arm64/conf/std.dev @@ -115,6 +115,7 @@ device mmcsd # mmc/sd flash cards options HID_DEBUG # enable debug msgs device hid # Generic HID support device hidbus # Generic HID Bus +options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/ # Firmware device mmio_sram # Generic on-chip SRAM diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h index 500f35c48787..c2065fdb3f8c 100644 --- a/sys/arm64/include/armreg.h +++ b/sys/arm64/include/armreg.h @@ -2612,10 +2612,12 @@ (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS) #define SCTLR_MMU_ON \ (SCTLR_MMU_OFF | \ + SCTLR_EPAN | \ SCTLR_BT1 | \ SCTLR_BT0 | \ SCTLR_UCI | \ SCTLR_SPAN | \ + SCTLR_IESB | \ SCTLR_nTWE | \ SCTLR_nTWI | \ SCTLR_UCT | \ diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h index 935e3754bf25..124da8c215ed 100644 --- a/sys/arm64/include/cpu.h +++ b/sys/arm64/include/cpu.h @@ -125,7 +125,11 @@ #define CPU_PART_NEOVERSE_V3 0xD84 #define CPU_PART_CORTEX_X925 0xD85 #define CPU_PART_CORTEX_A725 0xD87 +#define CPU_PART_C1_NANO 0xD8A +#define CPU_PART_C1_PRO 0xD8B +#define CPU_PART_C1_ULTRA 0xD8C #define CPU_PART_NEOVERSE_N3 0xD8E +#define CPU_PART_C1_PREMIUM 0xD90 /* Cavium Part numbers */ #define CPU_PART_THUNDERX 0x0A1 @@ -193,8 +197,30 @@ (((mask) & PCPU_GET(midr)) == \ ((mask) & CPU_ID_RAW((impl), (part), (var), (rev)))) -#define CPU_MATCH_RAW(mask, devid) \ - (((mask) & PCPU_GET(midr)) == ((mask) & (devid))) +#if !defined(__ASSEMBLER__) +static inline bool +midr_check_var_part_range(u_int midr, u_int impl, u_int part, u_int var_low, + u_int part_low, u_int var_high, u_int part_high) +{ + /* Check for the correct part */ + if (CPU_IMPL(midr) != impl || CPU_PART(midr) != part) + return (false); + + /* Check if the variant is between var_low and var_high inclusive */ + if (CPU_VAR(midr) < var_low || CPU_VAR(midr) > var_high) + return (false); + + /* If the variant is the low value, check if the part is high enough */ + if (CPU_VAR(midr) == var_low && CPU_PART(midr) < part_low) + return (false); + + /* If the variant is the high value, check if the part is low enough */ + if (CPU_VAR(midr) == var_high && CPU_PART(midr) > part_high) + return (false); + + return (true); +} +#endif /* * Chip-specific errata. This defines are intended to be @@ -226,6 +252,9 @@ extern uint64_t __cpu_affinity[]; struct arm64_addr_mask; extern struct arm64_addr_mask elf64_addr_mask; +#ifdef COMPAT_FREEBSD14 +extern struct arm64_addr_mask elf64_addr_mask_14; +#endif typedef void (*cpu_reset_hook_t)(void); extern cpu_reset_hook_t cpu_reset_hook; diff --git a/sys/arm64/include/cpu_feat.h b/sys/arm64/include/cpu_feat.h index 9fe6a9dd95d9..6a311d4000bb 100644 --- a/sys/arm64/include/cpu_feat.h +++ b/sys/arm64/include/cpu_feat.h @@ -29,6 +29,7 @@ #define _MACHINE_CPU_FEAT_H_ #include <sys/linker_set.h> +#include <sys/sysctl.h> typedef enum { ERRATA_UNKNOWN, /* Unknown erratum */ @@ -39,6 +40,31 @@ typedef enum { /* kernel component. */ } cpu_feat_errata; +typedef enum { + /* + * Don't implement the feature or erratum wrokarount, + * e.g. the feature is not implemented or erratum is + * for another CPU. + */ + FEAT_ALWAYS_DISABLE, + + /* + * Disable by default, but allow the user to enable, + * e.g. For a rare erratum with a workaround, Arm + * Category B (rare) or similar. + */ + FEAT_DEFAULT_DISABLE, + + /* + * Enabled by default, bit allow the user to disable, + * e.g. For a common erratum with a workaround, Arm + * Category A or B or similar. + */ + FEAT_DEFAULT_ENABLE, + + /* We could add FEAT_ALWAYS_ENABLE if a need was found. */ +} cpu_feat_en; + #define CPU_FEAT_STAGE_MASK 0x00000001 #define CPU_FEAT_EARLY_BOOT 0x00000000 #define CPU_FEAT_AFTER_DEV 0x00000001 @@ -47,23 +73,45 @@ typedef enum { #define CPU_FEAT_PER_CPU 0x00000000 #define CPU_FEAT_SYSTEM 0x00000010 +#define CPU_FEAT_USER_ENABLED 0x40000000 +#define CPU_FEAT_USER_DISABLED 0x80000000 + struct cpu_feat; -typedef bool (cpu_feat_check)(const struct cpu_feat *, u_int); +typedef cpu_feat_en (cpu_feat_check)(const struct cpu_feat *, u_int); typedef bool (cpu_feat_has_errata)(const struct cpu_feat *, u_int, u_int **, u_int *); -typedef void (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata, +typedef bool (cpu_feat_enable)(const struct cpu_feat *, cpu_feat_errata, u_int *, u_int); +typedef void (cpu_feat_disabled)(const struct cpu_feat *); struct cpu_feat { const char *feat_name; cpu_feat_check *feat_check; cpu_feat_has_errata *feat_has_errata; cpu_feat_enable *feat_enable; + cpu_feat_disabled *feat_disabled; uint32_t feat_flags; + bool feat_enabled; }; SET_DECLARE(cpu_feat_set, struct cpu_feat); +SYSCTL_DECL(_hw_feat); + +#define CPU_FEAT(name, descr, check, has_errata, enable, disabled, flags) \ +static struct cpu_feat name = { \ + .feat_name = #name, \ + .feat_check = check, \ + .feat_has_errata = has_errata, \ + .feat_enable = enable, \ + .feat_disabled = disabled, \ + .feat_flags = flags, \ + .feat_enabled = false, \ +}; \ +DATA_SET(cpu_feat_set, name); \ +SYSCTL_BOOL(_hw_feat, OID_AUTO, name, CTLFLAG_RD, &name.feat_enabled, \ + 0, descr) + /* * Allow drivers to mark an erratum as worked around, e.g. the Errata * Management ABI may know the workaround isn't needed on a given system. diff --git a/sys/arm64/include/elf.h b/sys/arm64/include/elf.h index d6328c143585..81ee7392f866 100644 --- a/sys/arm64/include/elf.h +++ b/sys/arm64/include/elf.h @@ -93,6 +93,9 @@ __ElfType(Auxinfo); #define ET_DYN_LOAD_ADDR 0x100000 #endif +/* First __FreeBSD_version that supports Top Byte Ignore (TBI) */ +#define TBI_VERSION 1500058 + /* HWCAP */ #define HWCAP_FP (1 << 0) #define HWCAP_ASIMD (1 << 1) diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h index 0f23f200f0f6..406b6e2c5e0a 100644 --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -69,6 +69,7 @@ struct md_page { TAILQ_HEAD(,pv_entry) pv_list; int pv_gen; vm_memattr_t pv_memattr; + uint8_t pv_reserve[3]; }; enum pmap_stage { @@ -174,6 +175,8 @@ int pmap_fault(pmap_t, uint64_t, uint64_t); struct pcb *pmap_switch(struct thread *); +void pmap_s1_invalidate_all_kernel(void); + extern void (*pmap_clean_stage2_tlbi)(void); extern void (*pmap_stage2_invalidate_range)(uint64_t, vm_offset_t, vm_offset_t, bool); diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h index dc2fa2df654d..b40990e89385 100644 --- a/sys/arm64/include/proc.h +++ b/sys/arm64/include/proc.h @@ -35,6 +35,7 @@ #ifndef _MACHINE_PROC_H_ #define _MACHINE_PROC_H_ +#ifndef LOCORE struct ptrauth_key { uint64_t pa_key_lo; uint64_t pa_key_hi; @@ -73,8 +74,13 @@ struct mdthread { }; struct mdproc { - long md_dummy; + uint64_t md_tcr; /* TCR_EL1 fields to update */ + uint64_t md_reserved[2]; }; +#endif /* !LOCORE */ + +/* Fields that can be set in md_tcr */ +#define MD_TCR_FIELDS TCR_TBI0 #define KINFO_PROC_SIZE 1088 #define KINFO_PROC32_SIZE 816 diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h index 73b5b4a09591..e839b5dd92c9 100644 --- a/sys/arm64/include/vmm.h +++ b/sys/arm64/include/vmm.h @@ -42,6 +42,7 @@ enum vm_suspend_how { VM_SUSPEND_RESET, VM_SUSPEND_POWEROFF, VM_SUSPEND_HALT, + VM_SUSPEND_DESTROY, VM_SUSPEND_LAST }; diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h index c30ca1b2bff4..781602306436 100644 --- a/sys/arm64/include/vmparam.h +++ b/sys/arm64/include/vmparam.h @@ -209,7 +209,8 @@ #define KMSAN_ORIG_MAX_ADDRESS (0xffff028000000000UL) /* The address bits that hold a pointer authentication code */ -#define PAC_ADDR_MASK (0xff7f000000000000UL) +#define PAC_ADDR_MASK (0x007f000000000000UL) +#define PAC_ADDR_MASK_14 (0xff7f000000000000UL) /* The top-byte ignore address bits */ #define TBI_ADDR_MASK 0xff00000000000000UL diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c index 847bc7394dd0..61614f532634 100644 --- a/sys/arm64/rockchip/rk_gpio.c +++ b/sys/arm64/rockchip/rk_gpio.c @@ -371,12 +371,13 @@ rk_gpio_attach(device_t dev) sc->swporta_ddr = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DDR); RK_GPIO_UNLOCK(sc); - sc->sc_busdev = gpiobus_attach_bus(dev); + sc->sc_busdev = gpiobus_add_bus(dev); if (sc->sc_busdev == NULL) { rk_gpio_detach(dev); return (ENXIO); } + bus_attach_children(dev); return (0); } diff --git a/sys/arm64/rockchip/rk_grf_gpio.c b/sys/arm64/rockchip/rk_grf_gpio.c index 6818bd85bb95..6ac419889614 100644 --- a/sys/arm64/rockchip/rk_grf_gpio.c +++ b/sys/arm64/rockchip/rk_grf_gpio.c @@ -181,11 +181,12 @@ rk_grf_gpio_attach(device_t dev) return (ENXIO); } - sc->sc_busdev = gpiobus_attach_bus(dev); + sc->sc_busdev = gpiobus_add_bus(dev); if (sc->sc_busdev == NULL) { return (ENXIO); } + bus_attach_children(dev); return (0); } diff --git a/sys/arm64/rockchip/rk_tsadc.c b/sys/arm64/rockchip/rk_tsadc.c index e6cbad36f697..d83b09480a0c 100644 --- a/sys/arm64/rockchip/rk_tsadc.c +++ b/sys/arm64/rockchip/rk_tsadc.c @@ -484,7 +484,7 @@ tsadc_init_tsensor(struct tsadc_softc *sc, struct tsensor *sensor) WR4(sc, TSADC_INT_EN, val); /* Shutdown temperature */ - val = tsadc_raw_to_temp(sc, sc->shutdown_temp); + val = tsadc_temp_to_raw(sc, sc->shutdown_temp); WR4(sc, TSADC_COMP_SHUT(sensor->channel), val); val = RD4(sc, TSADC_AUTO_CON); val |= TSADC_AUTO_SRC_EN(sensor->channel); diff --git a/sys/arm64/vmm/vmm.c b/sys/arm64/vmm/vmm.c index 3082d2941221..1dcefa1489e9 100644 --- a/sys/arm64/vmm/vmm.c +++ b/sys/arm64/vmm/vmm.c @@ -1342,8 +1342,14 @@ vm_handle_smccc_call(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) static int vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu) { + struct vm *vm; + + vm = vcpu->vm; vcpu_lock(vcpu); while (1) { + if (vm->suspend) + break; + if (vgic_has_pending_irq(vcpu->cookie)) break; |