aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2021-08-10 20:25:39 +0000
committerMark Johnston <markj@FreeBSD.org>2021-11-02 22:17:58 +0000
commit36ca4b79b84870cd7da2fe59a359d2c299b54032 (patch)
tree2c8172ac1e9d94495de730069f25eb5f41f05cd9
parent5fdfe78b42d0995e3b6849c43cbe94eeb3c308d4 (diff)
downloadsrc-36ca4b79b84870cd7da2fe59a359d2c299b54032.tar.gz
src-36ca4b79b84870cd7da2fe59a359d2c299b54032.zip
amd64: Define KVA regions for KMSAN shadow maps
KMSAN requires two shadow maps, each one-to-one with the kernel map. Allocate regions of the kernels PML4 page for them. Add functions to create mappings in the shadow map regions, these will be used by the KMSAN runtime. Reviewed by: alc, kib Sponsored by: The FreeBSD Foundation (cherry picked from commit f95f780ea4e163ce9a0295a699f41f0a7e1591d4)
-rw-r--r--sys/amd64/amd64/pmap.c78
-rw-r--r--sys/amd64/include/pmap.h13
-rw-r--r--sys/amd64/include/vmparam.h13
3 files changed, 102 insertions, 2 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 353ee18a862a..d2a168ca7f70 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -11358,6 +11358,76 @@ pmap_kasan_enter(vm_offset_t va)
}
#endif
+#ifdef KMSAN
+static vm_page_t
+pmap_kmsan_enter_alloc_4k(void)
+{
+ vm_page_t m;
+
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (m == NULL)
+ panic("%s: no memory to grow shadow map", __func__);
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+ return (m);
+}
+
+static vm_page_t
+pmap_kmsan_enter_alloc_2m(void)
+{
+ vm_page_t m;
+
+ m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED, NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT);
+ if (m != NULL)
+ memset((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 0, NBPDR);
+ return (m);
+}
+
+/*
+ * Grow the shadow or origin maps by at least one 4KB page at the specified
+ * address. Use 2MB pages when possible.
+ */
+void
+pmap_kmsan_enter(vm_offset_t va)
+{
+ pdp_entry_t *pdpe;
+ pd_entry_t *pde;
+ pt_entry_t *pte;
+ vm_page_t m;
+
+ mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+
+ pdpe = pmap_pdpe(kernel_pmap, va);
+ if ((*pdpe & X86_PG_V) == 0) {
+ m = pmap_kmsan_enter_alloc_4k();
+ *pdpe = (pdp_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
+ X86_PG_V | pg_nx);
+ }
+ pde = pmap_pdpe_to_pde(pdpe, va);
+ if ((*pde & X86_PG_V) == 0) {
+ m = pmap_kmsan_enter_alloc_2m();
+ if (m != NULL) {
+ *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
+ X86_PG_PS | X86_PG_V | X86_PG_A | X86_PG_M | pg_nx);
+ } else {
+ m = pmap_kmsan_enter_alloc_4k();
+ *pde = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW |
+ X86_PG_V | pg_nx);
+ }
+ }
+ if ((*pde & X86_PG_PS) != 0)
+ return;
+ pte = pmap_pde_to_pte(pde, va);
+ if ((*pte & X86_PG_V) != 0)
+ return;
+ m = pmap_kmsan_enter_alloc_4k();
+ *pte = (pt_entry_t)(VM_PAGE_TO_PHYS(m) | X86_PG_RW | X86_PG_V |
+ X86_PG_M | X86_PG_A | pg_nx);
+}
+#endif
+
/*
* Track a range of the kernel's virtual address space that is contiguous
* in various mapping attributes.
@@ -11540,6 +11610,14 @@ sysctl_kmaps(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "\nKASAN shadow map:\n");
break;
#endif
+#ifdef KMSAN
+ case KMSANSHADPML4I:
+ sbuf_printf(sb, "\nKMSAN shadow map:\n");
+ break;
+ case KMSANORIGPML4I:
+ sbuf_printf(sb, "\nKMSAN origin map:\n");
+ break;
+#endif
case KPML4BASE:
sbuf_printf(sb, "\nKernel map:\n");
break;
diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
index c5c1714f2f94..bd6a8c006813 100644
--- a/sys/amd64/include/pmap.h
+++ b/sys/amd64/include/pmap.h
@@ -202,6 +202,13 @@
#define NKASANPML4E ((NKPML4E + 7) / 8)
/*
+ * Number of PML4 slots for the KMSAN shadow and origin maps. These are
+ * one-to-one with the kernel map.
+ */
+#define NKMSANSHADPML4E NKPML4E
+#define NKMSANORIGPML4E NKPML4E
+
+/*
* We use the same numbering of the page table pages for 5-level and
* 4-level paging structures.
*/
@@ -251,6 +258,9 @@
#define KASANPML4I (DMPML4I - NKASANPML4E) /* Below the direct map */
+#define KMSANSHADPML4I (KPML4BASE - NKMSANSHADPML4E)
+#define KMSANORIGPML4I (DMPML4I - NKMSANORIGPML4E)
+
/* Large map: index of the first and max last pml4 entry */
#define LMSPML4I (PML4PML4I + 1)
#define LMEPML4I (KASANPML4I - 1)
@@ -521,6 +531,9 @@ vm_page_t pmap_page_alloc_below_4g(bool zeroed);
#ifdef KASAN
void pmap_kasan_enter(vm_offset_t);
#endif
+#ifdef KMSAN
+void pmap_kmsan_enter(vm_offset_t);
+#endif
#endif /* _KERNEL */
diff --git a/sys/amd64/include/vmparam.h b/sys/amd64/include/vmparam.h
index 88fd29b80be3..61d0dea54210 100644
--- a/sys/amd64/include/vmparam.h
+++ b/sys/amd64/include/vmparam.h
@@ -170,9 +170,10 @@
* 0xffff804020100fff - 0xffff807fffffffff unused
* 0xffff808000000000 - 0xffff847fffffffff large map (can be tuned up)
* 0xffff848000000000 - 0xfffff77fffffffff unused (large map extends there)
- * 0xfffff78000000000 - 0xfffff7ffffffffff 512GB KASAN shadow map
+ * 0xfffff60000000000 - 0xfffff7ffffffffff 2TB KMSAN origin map, optional
+ * 0xfffff78000000000 - 0xfffff7bfffffffff 512GB KASAN shadow map, optional
* 0xfffff80000000000 - 0xfffffbffffffffff 4TB direct map
- * 0xfffffc0000000000 - 0xfffffdffffffffff unused
+ * 0xfffffc0000000000 - 0xfffffdffffffffff 2TB KMSAN shadow map, optional
* 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map
*
* Within the kernel map:
@@ -191,6 +192,14 @@
#define KASAN_MIN_ADDRESS KV4ADDR(KASANPML4I, 0, 0, 0)
#define KASAN_MAX_ADDRESS KV4ADDR(KASANPML4I + NKASANPML4E, 0, 0, 0)
+#define KMSAN_SHAD_MIN_ADDRESS KV4ADDR(KMSANSHADPML4I, 0, 0, 0)
+#define KMSAN_SHAD_MAX_ADDRESS KV4ADDR(KMSANSHADPML4I + NKMSANSHADPML4E, \
+ 0, 0, 0)
+
+#define KMSAN_ORIG_MIN_ADDRESS KV4ADDR(KMSANORIGPML4I, 0, 0, 0)
+#define KMSAN_ORIG_MAX_ADDRESS KV4ADDR(KMSANORIGPML4I + NKMSANORIGPML4E, \
+ 0, 0, 0)
+
#define LARGEMAP_MIN_ADDRESS KV4ADDR(LMSPML4I, 0, 0, 0)
#define LARGEMAP_MAX_ADDRESS KV4ADDR(LMEPML4I + 1, 0, 0, 0)