aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2023-11-02 18:33:55 +0000
committerMark Johnston <markj@FreeBSD.org>2023-12-02 21:54:34 +0000
commit284ed592f06ddb8a79ebd6256322c46ece24fd79 (patch)
treedfe767603474d1afd2b22b1afbdaa8e9c5026174
parent8c88d17fa5c49a835966091acc9c41d19b1f3692 (diff)
downloadsrc-284ed592f06ddb8a79ebd6256322c46ece24fd79.tar.gz
src-284ed592f06ddb8a79ebd6256322c46ece24fd79.zip
riscv: Port improvements from arm64/amd64 pmaps, part 2
- Give pmap_promote_l2() a return value indicating whether or not promotion succeeded. - Check pmap_ps_enabled() in pmap_promote_l2() rather than making callers do it. - Annotate superpages_enabled with __read_frequently. Reviewed by: kib MFC after: 1 month Differential Revision: https://reviews.freebsd.org/D42289 (cherry picked from commit 3c4f46b0d57b76d304f8a94862c6ba9be0273565)
-rw-r--r--sys/riscv/riscv/pmap.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 716f00b36005..376027c8be8e 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -256,7 +256,7 @@ CTASSERT(VM_EARLY_DTB_ADDRESS < (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE)
static struct rwlock_padalign pvh_global_lock;
static struct mtx_padalign allpmaps_lock;
-static int superpages_enabled = 1;
+static int __read_frequently superpages_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
CTLFLAG_RDTUN, &superpages_enabled, 0,
"Enable support for transparent superpages");
@@ -2753,7 +2753,7 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
}
#if VM_NRESERVLEVEL > 0
-static void
+static bool
pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
struct rwlock **lockp)
{
@@ -2761,6 +2761,8 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
vm_paddr_t pa;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ if (!pmap_ps_enabled(pmap))
+ return (false);
KASSERT((pmap_load(l2) & PTE_RWX) == 0,
("pmap_promote_l2: invalid l2 entry %p", l2));
@@ -2777,7 +2779,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
va, pmap);
atomic_add_long(&pmap_l2_p_failures, 1);
- return;
+ return (false);
}
/*
@@ -2811,7 +2813,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
"pmap_promote_l2: failure for va %#lx pmap %p",
va, pmap);
atomic_add_long(&pmap_l2_p_failures, 1);
- return;
+ return (false);
}
while ((l3e & (PTE_W | PTE_D)) == PTE_W) {
if (atomic_fcmpset_64(l3, &l3e, l3e & ~PTE_W)) {
@@ -2824,7 +2826,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
"pmap_promote_l2: failure for va %#lx pmap %p",
va, pmap);
atomic_add_long(&pmap_l2_p_failures, 1);
- return;
+ return (false);
}
all_l3e_PTE_A &= l3e;
pa -= PAGE_SIZE;
@@ -2851,7 +2853,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx pmap %p",
va, pmap);
atomic_add_long(&pmap_l2_p_failures, 1);
- return;
+ return (false);
}
if ((firstl3e & PTE_SW_MANAGED) != 0)
@@ -2862,6 +2864,7 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va, vm_page_t ml3,
atomic_add_long(&pmap_l2_promotions, 1);
CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
pmap);
+ return (true);
}
#endif
@@ -3127,10 +3130,9 @@ validate:
#if VM_NRESERVLEVEL > 0
if (mpte != NULL && mpte->ref_count == Ln_ENTRIES &&
- pmap_ps_enabled(pmap) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0)
- pmap_promote_l2(pmap, l2, va, mpte, &lock);
+ (void)pmap_promote_l2(pmap, l2, va, mpte, &lock);
#endif
rv = KERN_SUCCESS;