aboutsummaryrefslogtreecommitdiff
path: root/sys/riscv/riscv
diff options
context:
space:
mode:
authorMitchell Horne <mhorne@FreeBSD.org>2020-02-12 14:06:02 +0000
committerMitchell Horne <mhorne@FreeBSD.org>2020-02-12 14:06:02 +0000
commit9ce0b407c2f37fd83b140cb93c582f445baa750d (patch)
tree1900cad8afc59653458ed3be1c3f32c98b09cbc2 /sys/riscv/riscv
parentccfb8acd708bf179fae5b45631bb259f4de2fefe (diff)
downloadsrc-9ce0b407c2f37fd83b140cb93c582f445baa750d.tar.gz
src-9ce0b407c2f37fd83b140cb93c582f445baa750d.zip
Implement vm.pmap.kernel_maps for RISC-V
This is taken from the arm64 version, with the following simplifications: - Our current pmap implementation uses a 3-level paging scheme - The "mode" field has been omitted since RISC-V PTEs don't encode typical mode attributes Reviewed by: markj Differential Revision: https://reviews.freebsd.org/D23594
Notes
Notes: svn path=/head/; revision=357821
Diffstat (limited to 'sys/riscv/riscv')
-rw-r--r--sys/riscv/riscv/pmap.c168
1 files changed, 168 insertions, 0 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 6e9c4cb77c39..89e0a25600dd 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -131,6 +131,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
+#include <sys/sbuf.h>
#include <sys/sx.h>
#include <sys/vmem.h>
#include <sys/vmmeter.h>
@@ -4487,3 +4488,170 @@ pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l1, pd_entry_t **l2,
return (true);
}
+
+/*
+ * Track a range of the kernel's virtual address space that is contiguous
+ * in various mapping attributes.
+ */
+struct pmap_kernel_map_range {
+ vm_offset_t sva;
+ pt_entry_t attrs;
+ int l3pages;
+ int l2pages;
+ int l1pages;
+};
+
+static void
+sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
+ vm_offset_t eva)
+{
+
+ if (eva <= range->sva)
+ return;
+
+ sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %d %d %d\n",
+ range->sva, eva,
+ (range->attrs & PTE_W) == PTE_W ? 'w' : '-',
+ (range->attrs & PTE_X) == PTE_X ? 'x' : '-',
+ (range->attrs & PTE_U) == PTE_U ? 'u' : 's',
+ (range->attrs & PTE_G) == PTE_G ? 'g' : '-',
+ range->l1pages, range->l2pages, range->l3pages);
+
+ /* Reset to sentinel value. */
+ range->sva = 0xfffffffffffffffful;
+}
+
+/*
+ * Determine whether the attributes specified by a page table entry match those
+ * being tracked by the current range.
+ */
+static bool
+sysctl_kmaps_match(struct pmap_kernel_map_range *range, pt_entry_t attrs)
+{
+
+ return (range->attrs == attrs);
+}
+
+static void
+sysctl_kmaps_reinit(struct pmap_kernel_map_range *range, vm_offset_t va,
+ pt_entry_t attrs)
+{
+
+ memset(range, 0, sizeof(*range));
+ range->sva = va;
+ range->attrs = attrs;
+}
+
+/*
+ * Given a leaf PTE, derive the mapping's attributes. If they do not match
+ * those of the current run, dump the address range and its attributes, and
+ * begin a new run.
+ */
+static void
+sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
+ vm_offset_t va, pd_entry_t l1e, pd_entry_t l2e, pt_entry_t l3e)
+{
+ pt_entry_t attrs;
+
+ /* The PTE global bit is inherited by lower levels. */
+ attrs = l1e & PTE_G;
+ if ((l1e & PTE_RWX) != 0)
+ attrs |= l1e & (PTE_RWX | PTE_U);
+ else if (l2e != 0)
+ attrs |= l2e & PTE_G;
+ if ((l2e & PTE_RWX) != 0)
+ attrs |= l2e & (PTE_RWX | PTE_U);
+ else if (l3e != 0)
+ attrs |= l3e & (PTE_RWX | PTE_U | PTE_G);
+
+ if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
+ sysctl_kmaps_dump(sb, range, va);
+ sysctl_kmaps_reinit(range, va, attrs);
+ }
+}
+
+static int
+sysctl_kmaps(SYSCTL_HANDLER_ARGS)
+{
+ struct pmap_kernel_map_range range;
+ struct sbuf sbuf, *sb;
+ pd_entry_t l1e, *l2, l2e;
+ pt_entry_t *l3, l3e;
+ vm_offset_t sva;
+ vm_paddr_t pa;
+ int error, i, j, k;
+
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
+ sb = &sbuf;
+ sbuf_new_for_sysctl(sb, NULL, PAGE_SIZE, req);
+
+ /* Sentinel value. */
+ range.sva = 0xfffffffffffffffful;
+
+ /*
+ * Iterate over the kernel page tables without holding the kernel pmap
+ * lock. Kernel page table pages are never freed, so at worst we will
+ * observe inconsistencies in the output.
+ */
+ sva = VM_MIN_KERNEL_ADDRESS;
+ for (i = pmap_l1_index(sva); i < Ln_ENTRIES; i++) {
+ if (i == pmap_l1_index(DMAP_MIN_ADDRESS))
+ sbuf_printf(sb, "\nDirect map:\n");
+ else if (i == pmap_l1_index(VM_MIN_KERNEL_ADDRESS))
+ sbuf_printf(sb, "\nKernel map:\n");
+
+ l1e = kernel_pmap->pm_l1[i];
+ if ((l1e & PTE_V) == 0) {
+ sysctl_kmaps_dump(sb, &range, sva);
+ sva += L1_SIZE;
+ continue;
+ }
+ if ((l1e & PTE_RWX) != 0) {
+ sysctl_kmaps_check(sb, &range, sva, l1e, 0, 0);
+ range.l1pages++;
+ sva += L1_SIZE;
+ continue;
+ }
+ pa = PTE_TO_PHYS(l1e);
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(pa);
+
+ for (j = pmap_l2_index(sva); j < Ln_ENTRIES; j++) {
+ l2e = l2[j];
+ if ((l2e & PTE_V) == 0) {
+ sysctl_kmaps_dump(sb, &range, sva);
+ sva += L2_SIZE;
+ continue;
+ }
+ if ((l2e & PTE_RWX) != 0) {
+ sysctl_kmaps_check(sb, &range, sva, l1e, l2e, 0);
+ range.l2pages++;
+ sva += L2_SIZE;
+ continue;
+ }
+ pa = PTE_TO_PHYS(l2e);
+ l3 = (pd_entry_t *)PHYS_TO_DMAP(pa);
+
+ for (k = pmap_l3_index(sva); k < Ln_ENTRIES; k++,
+ sva += L3_SIZE) {
+ l3e = l3[k];
+ if ((l3e & PTE_V) == 0) {
+ sysctl_kmaps_dump(sb, &range, sva);
+ continue;
+ }
+ sysctl_kmaps_check(sb, &range, sva,
+ l1e, l2e, l3e);
+ range.l3pages++;
+ }
+ }
+ }
+
+ error = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (error);
+}
+SYSCTL_OID(_vm_pmap, OID_AUTO, kernel_maps,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ NULL, 0, sysctl_kmaps, "A",
+ "Dump kernel address layout");