aboutsummaryrefslogtreecommitdiff
path: root/sys/arm64/arm64/pmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arm64/arm64/pmap.c')
-rw-r--r--sys/arm64/arm64/pmap.c266
1 files changed, 266 insertions, 0 deletions
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 17abce473c17..b62673f999e6 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -108,6 +108,7 @@ __FBSDID("$FreeBSD$");
#include "opt_vm.h"
#include <sys/param.h>
+#include <sys/asan.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
#include <sys/systm.h>
@@ -146,6 +147,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_dumpset.h>
#include <vm/uma.h>
+#include <machine/asan.h>
#include <machine/machdep.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
@@ -190,6 +192,9 @@ __FBSDID("$FreeBSD$");
#define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT))
#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
+#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | \
+ ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
+
struct pmap_large_md_page {
struct rwlock pv_lock;
struct md_page pv_page;
@@ -1211,6 +1216,54 @@ pmap_bootstrap_l3(vm_offset_t va)
pmap_bootstrap_l2_table(&bs_state);
}
+#ifdef KASAN
+static void
+pmap_bootstrap_allocate_kasan_l2(vm_paddr_t start_pa, vm_paddr_t end_pa,
+ vm_offset_t *start_va, int *nkasan_l2)
+{
+ int i;
+ vm_paddr_t pa;
+ vm_offset_t va;
+ pd_entry_t *l2;
+
+ va = *start_va;
+ pa = rounddown2(end_pa - L2_SIZE, L2_SIZE);
+ l2 = pmap_l2(kernel_pmap, va);
+
+ for (i = 0; pa >= start_pa && i < *nkasan_l2;
+ i++, va += L2_SIZE, pa -= L2_SIZE, l2++) {
+ /*
+ * KASAN stack checking results in us having already allocated
+ * part of our shadow map, so we can just skip those segments.
+ */
+ if ((pmap_load(l2) & ATTR_DESCR_VALID) != 0) {
+ pa += L2_SIZE;
+ continue;
+ }
+
+ pmap_store(l2, (pa & ~Ln_TABLE_MASK) | PMAP_SAN_PTE_BITS |
+ L2_BLOCK);
+ }
+
+ /*
+ * Ended the allocation due to start_pa constraint, rather than because
+ * we allocated everything. Adjust back up to the start_pa and remove
+ * the invalid L2 block from our accounting.
+ */
+ if (pa < start_pa) {
+ va += L2_SIZE;
+ i--;
+ pa = start_pa;
+ }
+
+ bzero((void *)PHYS_TO_DMAP(pa), i * L2_SIZE);
+ physmem_exclude_region(pa, i * L2_SIZE, EXFLAG_NOALLOC);
+
+ *nkasan_l2 -= i;
+ *start_va = va;
+}
+#endif
+
/*
* Bootstrap the system enough to run with virtual memory.
*/
@@ -1312,6 +1365,68 @@ pmap_bootstrap(vm_paddr_t kernstart, vm_size_t kernlen)
cpu_tlb_flushID();
}
+#if defined(KASAN)
+/*
+ * Finish constructing the initial shadow map:
+ * - Count how many pages from KERNBASE to virtual_avail (scaled for
+ * shadow map)
+ * - Map that entire range using L2 superpages.
+ */
+void
+pmap_bootstrap_san(vm_paddr_t kernstart)
+{
+ vm_offset_t va;
+ int i, shadow_npages, nkasan_l2;
+
+ /*
+ * Rebuild physmap one more time, we may have excluded more regions from
+ * allocation since pmap_bootstrap().
+ */
+ bzero(physmap, sizeof(physmap));
+ physmap_idx = physmem_avail(physmap, nitems(physmap));
+ physmap_idx /= 2;
+
+ shadow_npages = (virtual_avail - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
+ shadow_npages = howmany(shadow_npages, KASAN_SHADOW_SCALE);
+ nkasan_l2 = howmany(shadow_npages, Ln_ENTRIES);
+
+ /* Map the valid KVA up to this point. */
+ va = KASAN_MIN_ADDRESS;
+
+ /*
+ * Find a slot in the physmap large enough for what we needed. We try to put
+ * the shadow map as high up as we can to avoid depleting the lower 4GB in case
+ * it's needed for, e.g., an xhci controller that can only do 32-bit DMA.
+ */
+ for (i = (physmap_idx * 2) - 2; i >= 0 && nkasan_l2 > 0; i -= 2) {
+ vm_paddr_t plow, phigh;
+
+ /* L2 mappings must be backed by memory that is L2-aligned */
+ plow = roundup2(physmap[i], L2_SIZE);
+ phigh = physmap[i + 1];
+ if (plow >= phigh)
+ continue;
+ if (kernstart >= plow && kernstart < phigh)
+ phigh = kernstart;
+ if (phigh - plow >= L2_SIZE)
+ pmap_bootstrap_allocate_kasan_l2(plow, phigh, &va,
+ &nkasan_l2);
+ }
+
+ if (nkasan_l2 != 0)
+ panic("Could not find phys region for shadow map");
+
+ /*
+ * Done. We should now have a valid shadow address mapped for all KVA
+ * that has been mapped so far, i.e., KERNBASE to virtual_avail. Thus,
+ * shadow accesses by the kasan(9) runtime will succeed for this range.
+ * When the kernel virtual address range is later expanded, as will
+ * happen in vm_mem_init(), the shadow map will be grown as well. This
+ * is handled by pmap_san_enter().
+ */
+}
+#endif
+
/*
* Initialize a vm_page's machine-dependent fields.
*/
@@ -2580,6 +2695,8 @@ pmap_growkernel(vm_offset_t addr)
addr = roundup2(addr, L2_SIZE);
if (addr - 1 >= vm_map_max(kernel_map))
addr = vm_map_max(kernel_map);
+ if (kernel_vm_end < addr)
+ kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
while (kernel_vm_end < addr) {
l0 = pmap_l0(kernel_pmap, kernel_vm_end);
KASSERT(pmap_load(l0) != 0,
@@ -7556,6 +7673,151 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
}
+#if defined(KASAN)
+static vm_paddr_t pmap_san_early_kernstart;
+static pd_entry_t *pmap_san_early_l2;
+
+void __nosanitizeaddress
+pmap_san_bootstrap(struct arm64_bootparams *abp)
+{
+
+ pmap_san_early_kernstart = KERNBASE - abp->kern_delta;
+ kasan_init_early(abp->kern_stack, KSTACK_PAGES * PAGE_SIZE);
+}
+
+#define SAN_BOOTSTRAP_L2_SIZE (1 * L2_SIZE)
+#define SAN_BOOTSTRAP_SIZE (2 * PAGE_SIZE)
+static vm_offset_t __nosanitizeaddress
+pmap_san_enter_bootstrap_alloc_l2(void)
+{
+ static uint8_t bootstrap_data[SAN_BOOTSTRAP_L2_SIZE] __aligned(L2_SIZE);
+ static size_t offset = 0;
+ vm_offset_t addr;
+
+ if (offset + L2_SIZE > sizeof(bootstrap_data)) {
+ panic("%s: out of memory for the bootstrap shadow map L2 entries",
+ __func__);
+ }
+
+ addr = (uintptr_t)&bootstrap_data[offset];
+ offset += L2_SIZE;
+ return (addr);
+}
+
+/*
+ * SAN L1 + L2 pages, maybe L3 entries later?
+ */
+static vm_offset_t __nosanitizeaddress
+pmap_san_enter_bootstrap_alloc_pages(int npages)
+{
+ static uint8_t bootstrap_data[SAN_BOOTSTRAP_SIZE] __aligned(PAGE_SIZE);
+ static size_t offset = 0;
+ vm_offset_t addr;
+
+ if (offset + (npages * PAGE_SIZE) > sizeof(bootstrap_data)) {
+ panic("%s: out of memory for the bootstrap shadow map",
+ __func__);
+ }
+
+ addr = (uintptr_t)&bootstrap_data[offset];
+ offset += (npages * PAGE_SIZE);
+ return (addr);
+}
+
+static void __nosanitizeaddress
+pmap_san_enter_bootstrap(void)
+{
+ vm_offset_t freemempos;
+
+ /* L1, L2 */
+ freemempos = pmap_san_enter_bootstrap_alloc_pages(2);
+ bs_state.freemempos = freemempos;
+ bs_state.va = KASAN_MIN_ADDRESS;
+ pmap_bootstrap_l1_table(&bs_state);
+ pmap_san_early_l2 = bs_state.l2;
+}
+
+static vm_page_t
+pmap_san_enter_alloc_l3(void)
+{
+ vm_page_t m;
+
+ m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (m == NULL)
+ panic("%s: no memory to grow shadow map", __func__);
+ return (m);
+}
+
+static vm_page_t
+pmap_san_enter_alloc_l2(void)
+{
+ return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
+ Ln_ENTRIES, 0, ~0ul, L2_SIZE, 0, VM_MEMATTR_DEFAULT));
+}
+
+void __nosanitizeaddress
+pmap_san_enter(vm_offset_t va)
+{
+ pd_entry_t *l1, *l2;
+ pt_entry_t *l3;
+ vm_page_t m;
+
+ if (virtual_avail == 0) {
+ vm_offset_t block;
+ int slot;
+ bool first;
+
+ /* Temporary shadow map prior to pmap_bootstrap(). */
+ first = pmap_san_early_l2 == NULL;
+ if (first)
+ pmap_san_enter_bootstrap();
+
+ l2 = pmap_san_early_l2;
+ slot = pmap_l2_index(va);
+
+ if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) {
+ MPASS(first);
+ block = pmap_san_enter_bootstrap_alloc_l2();
+ pmap_store(&l2[slot], pmap_early_vtophys(block) |
+ PMAP_SAN_PTE_BITS | L2_BLOCK);
+ dmb(ishst);
+ }
+
+ return;
+ }
+
+ mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+ l1 = pmap_l1(kernel_pmap, va);
+ MPASS(l1 != NULL);
+ if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) {
+ m = pmap_san_enter_alloc_l3();
+ pmap_store(l1, (VM_PAGE_TO_PHYS(m) & ~Ln_TABLE_MASK) |
+ L1_TABLE);
+ }
+ l2 = pmap_l1_to_l2(l1, va);
+ if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) {
+ m = pmap_san_enter_alloc_l2();
+ if (m != NULL) {
+ pmap_store(l2, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS |
+ L2_BLOCK);
+ } else {
+ m = pmap_san_enter_alloc_l3();
+ pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
+ }
+ dmb(ishst);
+ }
+ if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK)
+ return;
+ l3 = pmap_l2_to_l3(l2, va);
+ if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0)
+ return;
+ m = pmap_san_enter_alloc_l3();
+ pmap_store(l3, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS | L3_PAGE);
+ dmb(ishst);
+}
+#endif /* KASAN */
+
/*
* Track a range of the kernel's virtual address space that is contiguous
* in various mapping attributes.
@@ -7724,6 +7986,10 @@ sysctl_kmaps(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "\nDirect map:\n");
else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
sbuf_printf(sb, "\nKernel map:\n");
+#ifdef KASAN
+ else if (i == pmap_l0_index(KASAN_MIN_ADDRESS))
+ sbuf_printf(sb, "\nKASAN shadow map:\n");
+#endif
l0e = kernel_pmap->pm_l0[i];
if ((l0e & ATTR_DESCR_VALID) == 0) {