aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2023-11-02 18:34:26 +0000
committerMark Johnston <markj@FreeBSD.org>2023-11-02 18:34:26 +0000
commit71b77a7172c26783a9d2181d3bed27cf62974200 (patch)
treeb2c3306db1dfdf3add40704a2cc2996f67f468b0
parent0b8372b707a50c2deeaf66d4c55fec51007cec44 (diff)
downloadsrc-71b77a7172c26783a9d2181d3bed27cf62974200.tar.gz
src-71b77a7172c26783a9d2181d3bed27cf62974200.zip
riscv: Remove unnecessary invalidations in pmap_enter_quick_locked()
This function always overwrites an invalid PTE, so if pmap_try_insert_pv_entry() fails it is certainly not necessary to invalidate anything, because the PTE has not yet been written by that point. It should also not be necessary to invalidate TLBs after overwriting an invalid entry. In principle the TLB could cache negative entries, but then the worst case scenario is a spurious fault. Since pmap_enter() does not bother issuing an sfence.vma, pmap_enter_quick_locked() should behave similarly. Reviewed by: kib MFC after: 1 month Differential Revision: https://reviews.freebsd.org/D42291
-rw-r--r--sys/riscv/riscv/pmap.c14
1 files changed, 4 insertions, 10 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index ebb0f069b4ab..f0108b611937 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -3458,11 +3458,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (l3 == NULL)
panic("pmap_enter_quick_locked: No l3");
if (pmap_load(l3) != 0) {
- if (mpte != NULL) {
+ if (mpte != NULL)
mpte->ref_count--;
- mpte = NULL;
- }
- return (mpte);
+ return (NULL);
}
/*
@@ -3472,13 +3470,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
SLIST_INIT(&free);
- if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
- pmap_invalidate_page(pmap, va);
+ if (pmap_unwire_ptp(pmap, va, mpte, &free))
vm_page_free_pages_toq(&free, false);
- }
- mpte = NULL;
}
- return (mpte);
+ return (NULL);
}
/*
@@ -3524,7 +3519,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
}
#endif
- pmap_invalidate_page(pmap, va);
return (mpte);
}