aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64')
-rw-r--r--sys/arm64/apple/apple_pinctrl.c3
-rw-r--r--sys/arm64/arm64/copyinout.S18
-rw-r--r--sys/arm64/arm64/efirt_machdep.c19
-rw-r--r--sys/arm64/arm64/elf_machdep.c17
-rw-r--r--sys/arm64/arm64/exec_machdep.c31
-rw-r--r--sys/arm64/arm64/genassym.c1
-rw-r--r--sys/arm64/arm64/locore.S23
-rw-r--r--sys/arm64/arm64/pmap.c51
-rw-r--r--sys/arm64/arm64/ptrauth.c4
-rw-r--r--sys/arm64/arm64/support.S9
-rw-r--r--sys/arm64/arm64/swtch.S12
-rw-r--r--sys/arm64/arm64/vm_machdep.c3
-rw-r--r--sys/arm64/conf/std.dev1
-rw-r--r--sys/arm64/include/armreg.h20
-rw-r--r--sys/arm64/include/cpu.h3
-rw-r--r--sys/arm64/include/elf.h3
-rw-r--r--sys/arm64/include/proc.h7
-rw-r--r--sys/arm64/include/vmm.h1
-rw-r--r--sys/arm64/include/vmparam.h8
-rw-r--r--sys/arm64/rockchip/rk_gpio.c3
-rw-r--r--sys/arm64/rockchip/rk_grf_gpio.c3
-rw-r--r--sys/arm64/vmm/vmm_arm64.c2
22 files changed, 191 insertions, 51 deletions
diff --git a/sys/arm64/apple/apple_pinctrl.c b/sys/arm64/apple/apple_pinctrl.c
index ebaaccea1d99..c28b1c62d78c 100644
--- a/sys/arm64/apple/apple_pinctrl.c
+++ b/sys/arm64/apple/apple_pinctrl.c
@@ -171,12 +171,13 @@ apple_pinctrl_attach(device_t dev)
OF_xref_from_node(ofw_bus_get_node(dev)));
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "failed to attach gpiobus\n");
goto error;
}
+ bus_attach_children(dev);
return (0);
error:
mtx_destroy(&sc->sc_mtx);
diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S
index 26dd0b4cf14f..e41c4b5f6734 100644
--- a/sys/arm64/arm64/copyinout.S
+++ b/sys/arm64/arm64/copyinout.S
@@ -37,7 +37,14 @@
#include "assym.inc"
.macro check_user_access user_arg, size_arg, bad_access_func
- adds x6, x\user_arg, x\size_arg
+ /*
+ * TBI is enabled from 15.0. Clear the top byte of the userspace
+ * address before checking whether it's within the given limit.
+ * The later load/store instructions will fault if TBI is disabled
+ * for the current process.
+ */
+ and x6, x\user_arg, #(~TBI_ADDR_MASK)
+ adds x6, x6, x\size_arg
b.cs \bad_access_func
ldr x7, =VM_MAXUSER_ADDRESS
cmp x6, x7
@@ -100,13 +107,20 @@ ENTRY(copyinstr)
adr x6, copyio_fault /* Get the handler address */
SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+ /*
+ * As in check_user_access mask off the TBI bits for the cmp
+ * instruction. The load will fail trap if TBI is disabled, but we
+ * need to check the address didn't wrap.
+ */
+ and x6, x0, #(~TBI_ADDR_MASK)
ldr x7, =VM_MAXUSER_ADDRESS
-1: cmp x0, x7
+1: cmp x6, x7
b.cs copyio_fault
ldtrb w4, [x0] /* Load from uaddr */
add x0, x0, #1 /* Next char */
strb w4, [x1], #1 /* Store in kaddr */
add x5, x5, #1 /* count++ */
+ add x6, x6, #1 /* Increment masked address */
cbz w4, 2f /* Break when NUL-terminated */
sub x2, x2, #1 /* len-- */
cbnz x2, 1b
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c
index 0f46e44f5d6a..0301eb91c9ef 100644
--- a/sys/arm64/arm64/efirt_machdep.c
+++ b/sys/arm64/arm64/efirt_machdep.c
@@ -241,6 +241,7 @@ fail:
int
efi_arch_enter(void)
{
+ uint64_t tcr;
CRITICAL_ASSERT(curthread);
curthread->td_md.md_efirt_dis_pf = vm_fault_disable_pagefaults();
@@ -249,7 +250,17 @@ efi_arch_enter(void)
* Temporarily switch to EFI's page table. However, we leave curpmap
* unchanged in order to prevent its ASID from being reclaimed before
* we switch back to its page table in efi_arch_leave().
+ *
+ * UEFI sdoesn't care about TBI, so enable it. It's more likely
+ * userspace will have TBI on as it's only disabled for backwards
+ * compatibility.
*/
+ tcr = READ_SPECIALREG(tcr_el1);
+ if ((tcr & MD_TCR_FIELDS) != TCR_TBI0) {
+ tcr &= ~MD_TCR_FIELDS;
+ tcr |= TCR_TBI0;
+ WRITE_SPECIALREG(tcr_el1, tcr);
+ }
set_ttbr0(efi_ttbr0);
if (PCPU_GET(bcast_tlbi_workaround) != 0)
invalidate_local_icache();
@@ -260,6 +271,7 @@ efi_arch_enter(void)
void
efi_arch_leave(void)
{
+ uint64_t proc_tcr, tcr;
/*
* Restore the pcpu pointer. Some UEFI implementations trash it and
@@ -271,6 +283,13 @@ efi_arch_leave(void)
__asm __volatile(
"mrs x18, tpidr_el1 \n"
);
+ proc_tcr = curthread->td_proc->p_md.md_tcr;
+ tcr = READ_SPECIALREG(tcr_el1);
+ if ((tcr & MD_TCR_FIELDS) != proc_tcr) {
+ tcr &= ~MD_TCR_FIELDS;
+ tcr |= proc_tcr;
+ WRITE_SPECIALREG(tcr_el1, tcr);
+ }
set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap)));
if (PCPU_GET(bcast_tlbi_workaround) != 0)
invalidate_local_icache();
diff --git a/sys/arm64/arm64/elf_machdep.c b/sys/arm64/arm64/elf_machdep.c
index 970dba0ca7d9..13af5c5065d6 100644
--- a/sys/arm64/arm64/elf_machdep.c
+++ b/sys/arm64/arm64/elf_machdep.c
@@ -65,7 +65,13 @@ u_long __read_frequently linux_elf_hwcap2;
u_long __read_frequently linux_elf_hwcap3;
u_long __read_frequently linux_elf_hwcap4;
-struct arm64_addr_mask elf64_addr_mask;
+struct arm64_addr_mask elf64_addr_mask = {
+ .code = TBI_ADDR_MASK,
+ .data = TBI_ADDR_MASK,
+};
+#ifdef COMPAT_FREEBSD14
+struct arm64_addr_mask elf64_addr_mask_14;
+#endif
static void arm64_exec_protect(struct image_params *, int);
@@ -136,7 +142,14 @@ get_arm64_addr_mask(struct regset *rs, struct thread *td, void *buf,
if (buf != NULL) {
KASSERT(*sizep == sizeof(elf64_addr_mask),
("%s: invalid size", __func__));
- memcpy(buf, &elf64_addr_mask, sizeof(elf64_addr_mask));
+#ifdef COMPAT_FREEBSD14
+ /* running an old binary use the old address mask */
+ if (td->td_proc->p_osrel < TBI_VERSION)
+ memcpy(buf, &elf64_addr_mask_14,
+ sizeof(elf64_addr_mask_14));
+ else
+#endif
+ memcpy(buf, &elf64_addr_mask, sizeof(elf64_addr_mask));
}
*sizep = sizeof(elf64_addr_mask);
diff --git a/sys/arm64/arm64/exec_machdep.c b/sys/arm64/arm64/exec_machdep.c
index 751329affd91..7c50dc93fdb4 100644
--- a/sys/arm64/arm64/exec_machdep.c
+++ b/sys/arm64/arm64/exec_machdep.c
@@ -51,6 +51,7 @@
#include <vm/vm_map.h>
#include <machine/armreg.h>
+#include <machine/elf.h>
#include <machine/kdb.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
@@ -411,6 +412,7 @@ exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
{
struct trapframe *tf = td->td_frame;
struct pcb *pcb = td->td_pcb;
+ uint64_t new_tcr, tcr;
memset(tf, 0, sizeof(struct trapframe));
@@ -433,6 +435,35 @@ exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
*/
bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
+ /* If the process is new enough enable TBI */
+ if (td->td_proc->p_osrel >= TBI_VERSION)
+ new_tcr = TCR_TBI0;
+ else
+ new_tcr = 0;
+ td->td_proc->p_md.md_tcr = new_tcr;
+
+ /* TODO: should create a pmap function for this... */
+ tcr = READ_SPECIALREG(tcr_el1);
+ if ((tcr & MD_TCR_FIELDS) != new_tcr) {
+ uint64_t asid;
+
+ tcr &= ~MD_TCR_FIELDS;
+ tcr |= new_tcr;
+ WRITE_SPECIALREG(tcr_el1, tcr);
+ isb();
+
+ /*
+ * TCR_EL1.TBI0 is permitted to be cached in the TLB, so
+ * we need to perform a TLB invalidation.
+ */
+ asid = READ_SPECIALREG(ttbr0_el1) & TTBR_ASID_MASK;
+ __asm __volatile(
+ "tlbi aside1is, %0 \n"
+ "dsb ish \n"
+ "isb \n"
+ : : "r" (asid));
+ }
+
/* Generate new pointer authentication keys */
ptrauth_exec(td);
}
diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c
index e3977798b046..22696796e69d 100644
--- a/sys/arm64/arm64/genassym.c
+++ b/sys/arm64/arm64/genassym.c
@@ -64,6 +64,7 @@ ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
ASSYM(P_PID, offsetof(struct proc, p_pid));
+ASSYM(P_MD_TCR, offsetof(struct proc, p_md.md_tcr));
ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
index f200195906ac..4a10a2b4f2d3 100644
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -319,14 +319,12 @@ LEND(mpentry_common)
* - Configure EL2 to support running the kernel at EL1 and exit to that
*/
LENTRY(enter_kernel_el)
-#define INIT_SCTLR_EL1 (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | \
- SCTLR_TSCXT | SCTLR_EOS)
mrs x23, CurrentEL
and x23, x23, #(CURRENTEL_EL_MASK)
cmp x23, #(CURRENTEL_EL_EL2)
b.eq 1f
- ldr x2, =INIT_SCTLR_EL1
+ ldr x2, =SCTLR_MMU_OFF
msr sctlr_el1, x2
/* SCTLR_EOS is set so eret is a context synchronizing event so we
* need an isb here to ensure it's observed by later instructions,
@@ -370,7 +368,7 @@ LENTRY(enter_kernel_el)
msr vmpidr_el2, x2
/* Set the initial sctlr_el1 */
- ldr x2, =INIT_SCTLR_EL1
+ ldr x2, =SCTLR_MMU_OFF
msr sctlr_el1, x2
/* Check for VHE */
@@ -442,7 +440,6 @@ LENTRY(enter_kernel_el)
isb
eret
-#undef INIT_SCTLR_EL1
LEND(enter_kernel_el)
/*
@@ -1037,11 +1034,7 @@ LENTRY(start_mmu)
/*
* Setup SCTLR.
*/
- ldr x2, sctlr_set
- ldr x3, sctlr_clear
- mrs x1, sctlr_el1
- bic x1, x1, x3 /* Clear the required bits */
- orr x1, x1, x2 /* Set the required bits */
+ ldr x1, =SCTLR_MMU_ON
msr sctlr_el1, x1
isb
@@ -1066,16 +1059,6 @@ tcr:
.quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \
TCR_SH1_IS | TCR_ORGN1_WBWA | TCR_IRGN1_WBWA | \
TCR_SH0_IS | TCR_ORGN0_WBWA | TCR_IRGN0_WBWA)
-sctlr_set:
- /* Bits to set */
- .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
- SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
- SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \
- SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0)
-sctlr_clear:
- /* Bits to clear */
- .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
- SCTLR_ITD | SCTLR_A)
LEND(start_mmu)
ENTRY(abort)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 2152f7fcc1c6..ec89c4573799 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -469,7 +469,7 @@ static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
-static bool pmap_activate_int(pmap_t pmap);
+static bool pmap_activate_int(struct thread *td, pmap_t pmap);
static void pmap_alloc_asid(pmap_t pmap);
static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
vm_prot_t prot, int mode, bool skip_unmapped);
@@ -2915,13 +2915,13 @@ retry:
l1 = pmap_l1(pmap, va);
if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
l2 = pmap_l1_to_l2(l1, va);
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
/* Add a reference to the L2 page. */
l2pg = PTE_TO_VM_PAGE(pmap_load(l1));
l2pg->ref_count++;
} else
l2pg = NULL;
- } else if (!ADDR_IS_KERNEL(va)) {
+ } else if (ADDR_IS_USER(va)) {
/* Allocate a L2 page. */
l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
@@ -4082,7 +4082,7 @@ pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
("pmap_remove_l3_range: range crosses an L3 page table boundary"));
- l3pg = !ADDR_IS_KERNEL(sva) ? PTE_TO_VM_PAGE(l2e) : NULL;
+ l3pg = ADDR_IS_USER(sva) ? PTE_TO_VM_PAGE(l2e) : NULL;
va = eva;
for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
old_l3 = pmap_load(l3);
@@ -5310,7 +5310,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= ATTR_SW_WIRED;
if (pmap->pm_stage == PM_STAGE1) {
- if (!ADDR_IS_KERNEL(va))
+ if (ADDR_IS_USER(va))
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
else
new_l3 |= ATTR_S1_UXN;
@@ -5401,7 +5401,7 @@ retry:
pde = pmap_pde(pmap, va, &lvl);
if (pde != NULL && lvl == 2) {
l3 = pmap_l2_to_l3(pde, va);
- if (!ADDR_IS_KERNEL(va) && mpte == NULL) {
+ if (ADDR_IS_USER(va) && mpte == NULL) {
mpte = PTE_TO_VM_PAGE(pmap_load(pde));
mpte->ref_count++;
}
@@ -5411,7 +5411,7 @@ retry:
if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
(l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
l3 = &l3[pmap_l3_index(va)];
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
mpte = PTE_TO_VM_PAGE(pmap_load(l2));
mpte->ref_count++;
}
@@ -5419,7 +5419,7 @@ retry:
}
/* We need to allocate an L3 table. */
}
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
/*
@@ -5657,7 +5657,7 @@ pmap_enter_l2_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
new_l2 |= ATTR_S1_XN;
- if (!ADDR_IS_KERNEL(va))
+ if (ADDR_IS_USER(va))
new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
else
new_l2 |= ATTR_S1_UXN;
@@ -5745,7 +5745,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
"pmap_enter_l2: no space for va %#lx"
" in pmap %p", va, pmap);
return (KERN_NO_SPACE);
- } else if (!ADDR_IS_KERNEL(va) ||
+ } else if (ADDR_IS_USER(va) ||
!pmap_every_pte_zero(PTE_TO_PHYS(old_l2))) {
if (l2pg != NULL)
l2pg->ref_count--;
@@ -5796,7 +5796,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
}
KASSERT(pmap_load(l2) == 0,
("pmap_enter_l2: non-zero L2 entry %p", l2));
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
vm_page_free_pages_toq(&free, true);
} else {
KASSERT(SLIST_EMPTY(&free),
@@ -5916,7 +5916,7 @@ pmap_enter_l3c_rx(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *ml3p,
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
l3e |= ATTR_S1_XN;
- if (!ADDR_IS_KERNEL(va))
+ if (ADDR_IS_USER(va))
l3e |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
else
l3e |= ATTR_S1_UXN;
@@ -5948,7 +5948,7 @@ pmap_enter_l3c(pmap_t pmap, vm_offset_t va, pt_entry_t l3e, u_int flags,
/*
* If the L3 PTP is not resident, we attempt to create it here.
*/
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
/*
* Were we given the correct L3 PTP? If so, we can simply
* increment its ref count.
@@ -6224,7 +6224,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* In the case that a page table page is not
* resident, we are creating it here.
*/
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
vm_pindex_t l2pindex;
/*
@@ -6310,7 +6310,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
l3_val |= ATTR_S1_XN;
- if (!ADDR_IS_KERNEL(va))
+ if (ADDR_IS_USER(va))
l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
else
l3_val |= ATTR_S1_UXN;
@@ -8528,7 +8528,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
* region and early kernel memory are the only parts of the
* kernel address space that must be handled here.
*/
- KASSERT(!ADDR_IS_KERNEL(va) || VIRT_IN_DMAP(va) ||
+ KASSERT(ADDR_IS_USER(va) || VIRT_IN_DMAP(va) ||
(va >= VM_MIN_KERNEL_ADDRESS && va < kernel_vm_end),
("pmap_demote_l2: No saved mpte for va %#lx", va));
@@ -8555,7 +8555,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
}
ml3->pindex = pmap_l2_pindex(va);
- if (!ADDR_IS_KERNEL(va)) {
+ if (ADDR_IS_USER(va)) {
ml3->ref_count = NL3PG;
pmap_resident_count_inc(pmap, 1);
}
@@ -9113,7 +9113,7 @@ pmap_init_cnp(void *dummy __unused)
SYSINIT(pmap_init_cnp, SI_SUB_SMP, SI_ORDER_ANY, pmap_init_cnp, NULL);
static bool
-pmap_activate_int(pmap_t pmap)
+pmap_activate_int(struct thread *td, pmap_t pmap)
{
struct asid_set *set;
int epoch;
@@ -9152,6 +9152,15 @@ pmap_activate_int(pmap_t pmap)
pmap_alloc_asid(pmap);
if (pmap->pm_stage == PM_STAGE1) {
+ uint64_t new_tcr, tcr;
+
+ new_tcr = td->td_proc->p_md.md_tcr;
+ tcr = READ_SPECIALREG(tcr_el1);
+ if ((tcr & MD_TCR_FIELDS) != new_tcr) {
+ tcr &= ~MD_TCR_FIELDS;
+ tcr |= new_tcr;
+ WRITE_SPECIALREG(tcr_el1, tcr);
+ }
set_ttbr0(pmap_to_ttbr0(pmap));
if (PCPU_GET(bcast_tlbi_workaround) != 0)
invalidate_local_icache();
@@ -9165,7 +9174,7 @@ pmap_activate_vm(pmap_t pmap)
PMAP_ASSERT_STAGE2(pmap);
- (void)pmap_activate_int(pmap);
+ (void)pmap_activate_int(NULL, pmap);
}
void
@@ -9176,7 +9185,7 @@ pmap_activate(struct thread *td)
pmap = vmspace_pmap(td->td_proc->p_vmspace);
PMAP_ASSERT_STAGE1(pmap);
critical_enter();
- (void)pmap_activate_int(pmap);
+ (void)pmap_activate_int(td, pmap);
critical_exit();
}
@@ -9202,7 +9211,7 @@ pmap_switch(struct thread *new)
* to a user process.
*/
- if (pmap_activate_int(vmspace_pmap(new->td_proc->p_vmspace))) {
+ if (pmap_activate_int(new, vmspace_pmap(new->td_proc->p_vmspace))) {
/*
* Stop userspace from training the branch predictor against
* other processes. This will call into a CPU specific
diff --git a/sys/arm64/arm64/ptrauth.c b/sys/arm64/arm64/ptrauth.c
index 767b7e115479..dbe0c69b8d60 100644
--- a/sys/arm64/arm64/ptrauth.c
+++ b/sys/arm64/arm64/ptrauth.c
@@ -149,6 +149,10 @@ ptrauth_enable(const struct cpu_feat *feat __unused,
enable_ptrauth = true;
elf64_addr_mask.code |= PAC_ADDR_MASK;
elf64_addr_mask.data |= PAC_ADDR_MASK;
+#ifdef COMPAT_FREEBSD14
+ elf64_addr_mask_14.code |= PAC_ADDR_MASK_14;
+ elf64_addr_mask_14.data |= PAC_ADDR_MASK_14;
+#endif
}
diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S
index 2d067c7f7730..bf6fc931e4b0 100644
--- a/sys/arm64/arm64/support.S
+++ b/sys/arm64/arm64/support.S
@@ -39,8 +39,15 @@
#include "assym.inc"
.macro check_user_access user_arg, limit, bad_addr_func
+ /*
+ * TBI is enabled from 15.0. Clear the top byte of the userspace
+ * address before checking whether it's within the given limit.
+ * The later load/store instructions will fault if TBI is disabled
+ * for the current process.
+ */
+ and x6, x\user_arg, #(~TBI_ADDR_MASK)
ldr x7, =(\limit)
- cmp x\user_arg, x7
+ cmp x6, x7
b.cs \bad_addr_func
.endm
diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S
index 7b6010a5f51f..a461fded929c 100644
--- a/sys/arm64/arm64/swtch.S
+++ b/sys/arm64/arm64/swtch.S
@@ -37,6 +37,8 @@
#include <machine/asm.h>
#include <machine/armreg.h>
+#include <machine/proc.h>
+
.macro clear_step_flag pcbflags, tmp
tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
mrs \tmp, mdscr_el1
@@ -239,6 +241,16 @@ ENTRY(fork_trampoline)
msr daifset, #(DAIF_D | DAIF_INTR)
ldr x0, [x18, #PC_CURTHREAD]
+
+ /* Set the per-process tcr_el1 fields */
+ ldr x10, [x0, #TD_PROC]
+ ldr x10, [x10, #P_MD_TCR]
+ mrs x11, tcr_el1
+ and x11, x11, #(~MD_TCR_FIELDS)
+ orr x11, x11, x10
+ msr tcr_el1, x11
+ /* No isb as the eret below is the context-synchronising event */
+
bl ptrauth_enter_el0
/* Restore sp, lr, elr, and spsr */
diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c
index 38a126ff602f..0134feb65b6a 100644
--- a/sys/arm64/arm64/vm_machdep.c
+++ b/sys/arm64/arm64/vm_machdep.c
@@ -120,6 +120,9 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
td2->td_md.md_spinlock_count = 1;
td2->td_md.md_saved_daif = PSR_DAIF_DEFAULT;
+ /* Copy the TCR_EL1 value */
+ td2->td_proc->p_md.md_tcr = td1->td_proc->p_md.md_tcr;
+
#if defined(PERTHREAD_SSP)
/* Set the new canary */
arc4random_buf(&td2->td_md.md_canary, sizeof(td2->td_md.md_canary));
diff --git a/sys/arm64/conf/std.dev b/sys/arm64/conf/std.dev
index c5c364ffda04..719f272426dd 100644
--- a/sys/arm64/conf/std.dev
+++ b/sys/arm64/conf/std.dev
@@ -115,6 +115,7 @@ device mmcsd # mmc/sd flash cards
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
# Firmware
device mmio_sram # Generic on-chip SRAM
diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h
index 38b7f57f7853..500f35c48787 100644
--- a/sys/arm64/include/armreg.h
+++ b/sys/arm64/include/armreg.h
@@ -2608,6 +2608,26 @@
#define SCTLR_EnALS (UL(0x1) << 56)
#define SCTLR_EPAN (UL(0x1) << 57)
+#define SCTLR_MMU_OFF \
+ (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
+#define SCTLR_MMU_ON \
+ (SCTLR_MMU_OFF | \
+ SCTLR_BT1 | \
+ SCTLR_BT0 | \
+ SCTLR_UCI | \
+ SCTLR_SPAN | \
+ SCTLR_nTWE | \
+ SCTLR_nTWI | \
+ SCTLR_UCT | \
+ SCTLR_DZE | \
+ SCTLR_I | \
+ SCTLR_SED | \
+ SCTLR_CP15BEN | \
+ SCTLR_SA0 | \
+ SCTLR_SA | \
+ SCTLR_C | \
+ SCTLR_M)
+
/* SCTLR_EL12 */
#define SCTLR_EL12_REG MRS_REG_ALT_NAME(SCTLR_EL12)
#define SCTLR_EL12_op0 3
diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h
index 935e3754bf25..59cda36f275e 100644
--- a/sys/arm64/include/cpu.h
+++ b/sys/arm64/include/cpu.h
@@ -226,6 +226,9 @@ extern uint64_t __cpu_affinity[];
struct arm64_addr_mask;
extern struct arm64_addr_mask elf64_addr_mask;
+#ifdef COMPAT_FREEBSD14
+extern struct arm64_addr_mask elf64_addr_mask_14;
+#endif
typedef void (*cpu_reset_hook_t)(void);
extern cpu_reset_hook_t cpu_reset_hook;
diff --git a/sys/arm64/include/elf.h b/sys/arm64/include/elf.h
index d6328c143585..81ee7392f866 100644
--- a/sys/arm64/include/elf.h
+++ b/sys/arm64/include/elf.h
@@ -93,6 +93,9 @@ __ElfType(Auxinfo);
#define ET_DYN_LOAD_ADDR 0x100000
#endif
+/* First __FreeBSD_version that supports Top Byte Ignore (TBI) */
+#define TBI_VERSION 1500058
+
/* HWCAP */
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
diff --git a/sys/arm64/include/proc.h b/sys/arm64/include/proc.h
index dc2fa2df654d..184743d4cc80 100644
--- a/sys/arm64/include/proc.h
+++ b/sys/arm64/include/proc.h
@@ -35,6 +35,7 @@
#ifndef _MACHINE_PROC_H_
#define _MACHINE_PROC_H_
+#ifndef LOCORE
struct ptrauth_key {
uint64_t pa_key_lo;
uint64_t pa_key_hi;
@@ -73,8 +74,12 @@ struct mdthread {
};
struct mdproc {
- long md_dummy;
+ uint64_t md_tcr; /* TCR_EL1 fields to update */
};
+#endif /* !LOCORE */
+
+/* Fields that can be set in md_tcr */
+#define MD_TCR_FIELDS TCR_TBI0
#define KINFO_PROC_SIZE 1088
#define KINFO_PROC32_SIZE 816
diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
index 1d783cdacb0d..73b5b4a09591 100644
--- a/sys/arm64/include/vmm.h
+++ b/sys/arm64/include/vmm.h
@@ -89,6 +89,7 @@ enum vm_reg_name {
VM_REG_GUEST_TTBR1_EL1,
VM_REG_GUEST_TCR_EL1,
VM_REG_GUEST_TCR2_EL1,
+ VM_REG_GUEST_MPIDR_EL1,
VM_REG_LAST
};
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
index 349849845e73..781602306436 100644
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -209,10 +209,16 @@
#define KMSAN_ORIG_MAX_ADDRESS (0xffff028000000000UL)
/* The address bits that hold a pointer authentication code */
-#define PAC_ADDR_MASK (0xff7f000000000000UL)
+#define PAC_ADDR_MASK (0x007f000000000000UL)
+#define PAC_ADDR_MASK_14 (0xff7f000000000000UL)
+
+/* The top-byte ignore address bits */
+#define TBI_ADDR_MASK 0xff00000000000000UL
/* If true addr is in the kernel address space */
#define ADDR_IS_KERNEL(addr) (((addr) & (1ul << 55)) == (1ul << 55))
+/* If true addr is in the user address space */
+#define ADDR_IS_USER(addr) (((addr) & (1ul << 55)) == 0)
/* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
#define ADDR_IS_CANONICAL(addr) \
(((addr) & 0xffff000000000000UL) == 0 || \
diff --git a/sys/arm64/rockchip/rk_gpio.c b/sys/arm64/rockchip/rk_gpio.c
index 847bc7394dd0..61614f532634 100644
--- a/sys/arm64/rockchip/rk_gpio.c
+++ b/sys/arm64/rockchip/rk_gpio.c
@@ -371,12 +371,13 @@ rk_gpio_attach(device_t dev)
sc->swporta_ddr = rk_gpio_read_4(sc, RK_GPIO_SWPORTA_DDR);
RK_GPIO_UNLOCK(sc);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
rk_gpio_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm64/rockchip/rk_grf_gpio.c b/sys/arm64/rockchip/rk_grf_gpio.c
index 6818bd85bb95..6ac419889614 100644
--- a/sys/arm64/rockchip/rk_grf_gpio.c
+++ b/sys/arm64/rockchip/rk_grf_gpio.c
@@ -181,11 +181,12 @@ rk_grf_gpio_attach(device_t dev)
return (ENXIO);
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/arm64/vmm/vmm_arm64.c b/sys/arm64/vmm/vmm_arm64.c
index de2425aae0a1..e293c99a6646 100644
--- a/sys/arm64/vmm/vmm_arm64.c
+++ b/sys/arm64/vmm/vmm_arm64.c
@@ -1251,6 +1251,8 @@ hypctx_regptr(struct hypctx *hypctx, int reg)
return (&hypctx->tcr_el1);
case VM_REG_GUEST_TCR2_EL1:
return (&hypctx->tcr2_el1);
+ case VM_REG_GUEST_MPIDR_EL1:
+ return (&hypctx->vmpidr_el2);
default:
break;
}