aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBojan Novković <bojan.novkovic@fer.hr>2023-11-08 10:20:06 +0000
committerMark Johnston <markj@FreeBSD.org>2023-11-08 12:19:15 +0000
commitd0941ed9b5c39d92d0aa75bc253506cb59a2e9a1 (patch)
tree77bc553033158b8f9b55271c75543b80d1236596
parent7e5002e3d6038b69e23f6c1982caf20cd62139f7 (diff)
downloadsrc-d0941ed9b5c39d92d0aa75bc253506cb59a2e9a1.tar.gz
src-d0941ed9b5c39d92d0aa75bc253506cb59a2e9a1.zip
riscv: Add a leaf PTP when pmap_enter(psind=1) creates a wired mapping
Let pmap_enter_l2() create wired mappings. In particular, allocate a leaf PTP for use during demotion. This is the last pmap which requires such a change ahead of reverting commit 64087fd7f372. Reviewed by: markj Sponsored by: Google, Inc. (GSoC 2023) MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D41633
-rw-r--r--sys/riscv/riscv/pmap.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index f6923363c484..223bf0243964 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -2685,6 +2685,8 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
("pmap_demote_l2_locked: oldl2 is not a leaf entry"));
if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
NULL) {
+ KASSERT((oldl2 & PTE_SW_WIRED) == 0,
+ ("pmap_demote_l2_locked: page table page for a wired mapping is missing"));
if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc_noobj(
(VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
VM_ALLOC_WIRED)) == NULL) {
@@ -3217,6 +3219,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
pd_entry_t *l2, *l3, oldl2;
vm_offset_t sva;
vm_page_t l2pg, mt;
+ vm_page_t uwptpg;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -3274,6 +3277,24 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
("pmap_enter_l2: non-zero L2 entry %p", l2));
}
+ /*
+ * Allocate leaf ptpage for wired userspace pages.
+ */
+ uwptpg = NULL;
+ if ((new_l2 & PTE_SW_WIRED) != 0 && pmap != kernel_pmap) {
+ uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED);
+ if (uwptpg == NULL) {
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ uwptpg->pindex = pmap_l2_pindex(va);
+ if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
+ vm_page_unwire_noq(uwptpg);
+ vm_page_free(uwptpg);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ pmap_resident_count_inc(pmap, 1);
+ uwptpg->ref_count = Ln_ENTRIES;
+ }
if ((new_l2 & PTE_SW_MANAGED) != 0) {
/*
* Abort this mapping if its PV entry could not be created.
@@ -3290,6 +3311,16 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
+ if (uwptpg != NULL) {
+ mt = pmap_remove_pt_page(pmap, va);
+ KASSERT(mt == uwptpg,
+ ("removed pt page %p, expected %p", mt,
+ uwptpg));
+ pmap_resident_count_dec(pmap, 1);
+ uwptpg->ref_count = 1;
+ vm_page_unwire_noq(uwptpg);
+ vm_page_free(uwptpg);
+ }
CTR2(KTR_PMAP,
"pmap_enter_l2: failed to create PV entry"
" for va %#lx in pmap %p", va, pmap);