diff options
Diffstat (limited to 'sys/arm64/arm64/efirt_machdep.c')
| -rw-r--r-- | sys/arm64/arm64/efirt_machdep.c | 33 |
1 files changed, 28 insertions, 5 deletions
diff --git a/sys/arm64/arm64/efirt_machdep.c b/sys/arm64/arm64/efirt_machdep.c index 0f46e44f5d6a..bde0d4f784dc 100644 --- a/sys/arm64/arm64/efirt_machdep.c +++ b/sys/arm64/arm64/efirt_machdep.c @@ -106,7 +106,8 @@ efi_1t1_l3(vm_offset_t va) if (*l0 == 0) { m = efi_1t1_page(); mphys = VM_PAGE_TO_PHYS(m); - *l0 = PHYS_TO_PTE(mphys) | L0_TABLE; + *l0 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE | + TATTR_AP_TABLE_NO_EL0 | L0_TABLE; } else { mphys = PTE_TO_PHYS(*l0); } @@ -117,7 +118,8 @@ efi_1t1_l3(vm_offset_t va) if (*l1 == 0) { m = efi_1t1_page(); mphys = VM_PAGE_TO_PHYS(m); - *l1 = PHYS_TO_PTE(mphys) | L1_TABLE; + *l1 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE | + TATTR_AP_TABLE_NO_EL0 | L1_TABLE; } else { mphys = PTE_TO_PHYS(*l1); } @@ -128,7 +130,8 @@ efi_1t1_l3(vm_offset_t va) if (*l2 == 0) { m = efi_1t1_page(); mphys = VM_PAGE_TO_PHYS(m); - *l2 = PHYS_TO_PTE(mphys) | L2_TABLE; + *l2 = PHYS_TO_PTE(mphys) | TATTR_UXN_TABLE | + TATTR_AP_TABLE_NO_EL0 | L2_TABLE; } else { mphys = PTE_TO_PHYS(*l2); } @@ -218,8 +221,9 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz) p->md_phys, mode, p->md_pages); } - l3_attr = ATTR_AF | pmap_sh_attr | ATTR_S1_IDX(mode) | - ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE; + l3_attr = ATTR_S1_UXN | ATTR_AF | pmap_sh_attr | + ATTR_S1_IDX(mode) | ATTR_S1_AP(ATTR_S1_AP_RW) | + ATTR_S1_nG | L3_PAGE; if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP) l3_attr |= ATTR_S1_XN; @@ -241,6 +245,7 @@ fail: int efi_arch_enter(void) { + uint64_t tcr; CRITICAL_ASSERT(curthread); curthread->td_md.md_efirt_dis_pf = vm_fault_disable_pagefaults(); @@ -249,7 +254,17 @@ efi_arch_enter(void) * Temporarily switch to EFI's page table. However, we leave curpmap * unchanged in order to prevent its ASID from being reclaimed before * we switch back to its page table in efi_arch_leave(). + * + * UEFI sdoesn't care about TBI, so enable it. It's more likely + * userspace will have TBI on as it's only disabled for backwards + * compatibility. */ + tcr = READ_SPECIALREG(tcr_el1); + if ((tcr & MD_TCR_FIELDS) != TCR_TBI0) { + tcr &= ~MD_TCR_FIELDS; + tcr |= TCR_TBI0; + WRITE_SPECIALREG(tcr_el1, tcr); + } set_ttbr0(efi_ttbr0); if (PCPU_GET(bcast_tlbi_workaround) != 0) invalidate_local_icache(); @@ -260,6 +275,7 @@ efi_arch_enter(void) void efi_arch_leave(void) { + uint64_t proc_tcr, tcr; /* * Restore the pcpu pointer. Some UEFI implementations trash it and @@ -271,6 +287,13 @@ efi_arch_leave(void) __asm __volatile( "mrs x18, tpidr_el1 \n" ); + proc_tcr = curthread->td_proc->p_md.md_tcr; + tcr = READ_SPECIALREG(tcr_el1); + if ((tcr & MD_TCR_FIELDS) != proc_tcr) { + tcr &= ~MD_TCR_FIELDS; + tcr |= proc_tcr; + WRITE_SPECIALREG(tcr_el1, tcr); + } set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap))); if (PCPU_GET(bcast_tlbi_workaround) != 0) invalidate_local_icache(); |
