aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorJustin Hibbits <jhibbits@FreeBSD.org>2021-05-10 00:19:07 +0000
committerJustin Hibbits <jhibbits@FreeBSD.org>2021-05-29 20:14:33 +0000
commit811e645d286d0b97c02d1b66a59a76c327d46c35 (patch)
tree1146027025d9569b3246c274f539566eaa8d2805 /sys
parent09947faee84b84f8126fd9dcf30ea8fd9a0cae92 (diff)
downloadsrc-811e645d286d0b97c02d1b66a59a76c327d46c35.tar.gz
src-811e645d286d0b97c02d1b66a59a76c327d46c35.zip
Apply r350463(43ded0a321a) to powerpc64 radix pmap
Invalidate the last page of a demoted superpage mapping, instead of the first page, as it results in slightly more promotions and fewer failures. While here, replace 'boolean_t's with 'bool's in mmu_radix_advise().
Diffstat (limited to 'sys')
-rw-r--r--sys/powerpc/aim/mmu_radix.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index e86bfa94c7ae..83eda03f9556 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -2207,11 +2207,11 @@ mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pt_entry_t *pte;
vm_offset_t va, va_next;
vm_page_t m;
- boolean_t anychanged;
+ bool anychanged;
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
- anychanged = FALSE;
+ anychanged = false;
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
l1e = pmap_pml1e(pmap, sva);
@@ -2252,17 +2252,25 @@ mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
/*
* Unless the page mappings are wired, remove the
* mapping to a single page so that a subsequent
- * access may repromote. Since the underlying page
- * table page is fully populated, this removal never
- * frees a page table page.
+ * access may repromote. Choosing the last page
+ * within the address range [sva, min(va_next, eva))
+ * generally results in more repromotions. Since the
+ * underlying page table page is fully populated, this
+ * removal never frees a page table page.
*/
if ((oldl3e & PG_W) == 0) {
- pte = pmap_l3e_to_pte(l3e, sva);
+ va = eva;
+ if (va > va_next)
+ va = va_next;
+ va -= PAGE_SIZE;
+ KASSERT(va >= sva,
+ ("mmu_radix_advise: no address gap"));
+ pte = pmap_l3e_to_pte(l3e, va);
KASSERT((be64toh(*pte) & PG_V) != 0,
("pmap_advise: invalid PTE"));
- pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), NULL,
+ pmap_remove_pte(pmap, pte, va, be64toh(*l3e), NULL,
&lock);
- anychanged = TRUE;
+ anychanged = true;
}
if (lock != NULL)
rw_wunlock(lock);
@@ -2291,7 +2299,7 @@ mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
atomic_clear_long(pte, htobe64(PG_A));
else
goto maybe_invlrng;
- anychanged = TRUE;
+ anychanged = true;
continue;
maybe_invlrng:
if (va != va_next) {