diff options
author | Mark Johnston <markj@FreeBSD.org> | 2021-10-20 00:23:39 +0000 |
---|---|---|
committer | Mark Johnston <markj@FreeBSD.org> | 2021-11-03 17:39:36 +0000 |
commit | 66cb1858f44992af73b6e1f9eeceb500bffd2534 (patch) | |
tree | 751ab5fdb3715e206b80ad574935d875117d6dc7 /sys/powerpc | |
parent | 24204bede3ad8126348c568420a25d47e5bdfe42 (diff) | |
download | src-66cb1858f44992af73b6e1f9eeceb500bffd2534.tar.gz src-66cb1858f44992af73b6e1f9eeceb500bffd2534.zip |
Convert vm_page_alloc() callers to use vm_page_alloc_noobj().
Remove page zeroing code from consumers and stop specifying
VM_ALLOC_NOOBJ. In a few places, also convert an allocation loop to
simply use VM_ALLOC_WAITOK.
Similarly, convert vm_page_alloc_domain() callers.
Note that callers are now responsible for assigning the pindex.
Reviewed by: alc, hselasky, kib
Sponsored by: The FreeBSD Foundation
(cherry picked from commit a4667e09e6520dc2c4b0b988051f060fed695a91)
Diffstat (limited to 'sys/powerpc')
-rw-r--r-- | sys/powerpc/aim/mmu_oea64.c | 7 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_radix.c | 36 | ||||
-rw-r--r-- | sys/powerpc/booke/pmap_32.c | 4 | ||||
-rw-r--r-- | sys/powerpc/booke/pmap_64.c | 9 | ||||
-rw-r--r-- | sys/powerpc/powerpc/uma_machdep.c | 7 |
5 files changed, 23 insertions, 40 deletions
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index 3faa3d0752b8..2f7f6ce29422 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -1922,8 +1922,8 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, *flags = UMA_SLAB_PRIV; needed_lock = !PMAP_LOCKED(kernel_pmap); - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); @@ -1945,9 +1945,6 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, if (needed_lock) PMAP_UNLOCK(kernel_pmap); - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero((void *)va, PAGE_SIZE); - return (void *)va; } diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c index b9ca10448f9f..6dba22f2cd0f 100644 --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -1206,8 +1206,7 @@ retry: break; } for (reclaimed = false; avail < needed; avail += _NPCPV) { - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { m = reclaim_pv_chunk(pmap, lockp); if (m == NULL) @@ -1629,8 +1628,7 @@ retry: } } /* No free items, allocate another chunk */ - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED); + m = vm_page_alloc_noobj(VM_ALLOC_WIRED); if (m == NULL) { if (lockp == NULL) { PV_STAT(pc_chunk_tryfail++); @@ -3503,13 +3501,11 @@ mmu_radix_growkernel(vm_offset_t addr) l2e = pmap_pml2e(kernel_pmap, kernel_vm_end); if ((be64toh(*l2e) & PG_V) == 0) { /* We need a new PDP entry */ - nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - mmu_radix_zero_page(nkpg); + nkpg->pindex = kernel_vm_end >> L2_PAGE_SIZE_SHIFT; paddr = VM_PAGE_TO_PHYS(nkpg); pde_store(l2e, paddr); continue; /* try again */ @@ -3524,13 +3520,11 @@ mmu_radix_growkernel(vm_offset_t addr) continue; } - nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end), - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (nkpg == NULL) panic("pmap_growkernel: no memory to grow kernel"); - if ((nkpg->flags & PG_ZERO) == 0) - mmu_radix_zero_page(nkpg); + nkpg->pindex = pmap_l3e_pindex(kernel_vm_end); paddr = VM_PAGE_TO_PHYS(nkpg); pde_store(l3e, paddr); @@ -4217,8 +4211,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) /* * Allocate a page table page. */ - if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | - VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (lockp != NULL) { RELEASE_PV_LIST_LOCK(lockp); PMAP_UNLOCK(pmap); @@ -4231,8 +4224,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) */ return (NULL); } - if ((m->flags & PG_ZERO) == 0) - mmu_radix_zero_page(m); + m->pindex = ptepindex; /* * Map the pagetable page into the process address space, if @@ -4889,10 +4881,9 @@ pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, * is the only part of the kernel address space that must be * handled here. */ - if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, - pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va < - DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc_noobj( + (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS ? + VM_ALLOC_INTERRUPT : 0) | VM_ALLOC_WIRED)) == NULL) { SLIST_INIT(&free); sva = trunc_2mpage(va); pmap_remove_l3e(pmap, l3e, sva, &free, lockp); @@ -4902,6 +4893,7 @@ pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, " in pmap %p", va, pmap); return (FALSE); } + mpte->pindex = pmap_l3e_pindex(va); if (va < VM_MAXUSER_ADDRESS) pmap_resident_count_inc(pmap, 1); } @@ -5921,13 +5913,13 @@ pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va) oldpdpe = be64toh(*l2e); KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V), ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); - pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT, - VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + pdpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); if (pdpg == NULL) { CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); } + pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT; pdpgpa = VM_PAGE_TO_PHYS(pdpg); firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa); KASSERT((oldpdpe & PG_A) != 0, diff --git a/sys/powerpc/booke/pmap_32.c b/sys/powerpc/booke/pmap_32.c index a9f8af0565f0..fa9e0a6422d8 100644 --- a/sys/powerpc/booke/pmap_32.c +++ b/sys/powerpc/booke/pmap_32.c @@ -264,8 +264,7 @@ ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) for (i = 0; i < PTBL_PAGES; i++) { pidx = (PTBL_PAGES * pdir_idx) + i; - while ((m = vm_page_alloc(NULL, pidx, - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + while ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED)) == NULL) { if (nosleep) { ptbl_free_pmap_ptbl(pmap, ptbl); for (j = 0; j < i; j++) @@ -279,6 +278,7 @@ ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } + m->pindex = pidx; mtbl[i] = m; } diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c index 55db7a325be5..05db755740a1 100644 --- a/sys/powerpc/booke/pmap_64.c +++ b/sys/powerpc/booke/pmap_64.c @@ -157,8 +157,8 @@ mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep) vm_page_t m; int req; - req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO; - while ((m = vm_page_alloc(NULL, idx, req)) == NULL) { + req = VM_ALLOC_WIRED | VM_ALLOC_ZERO; + while ((m = vm_page_alloc_noobj(req)) == NULL) { if (nosleep) return (0); @@ -168,10 +168,7 @@ mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep) rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } - - if (!(m->flags & PG_ZERO)) - /* Zero whole ptbl. */ - mmu_booke_zero_page(m); + m->pindex = idx; return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); } diff --git a/sys/powerpc/powerpc/uma_machdep.c b/sys/powerpc/powerpc/uma_machdep.c index a1769f61d671..ac3d98e18d0b 100644 --- a/sys/powerpc/powerpc/uma_machdep.c +++ b/sys/powerpc/powerpc/uma_machdep.c @@ -55,8 +55,8 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, *flags = UMA_SLAB_PRIV; - m = vm_page_alloc_domain(NULL, 0, domain, - malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) | + VM_ALLOC_WIRED); if (m == NULL) return (NULL); @@ -72,9 +72,6 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags, } else { va = (void *)(vm_offset_t)PHYS_TO_DMAP(pa); } - - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); atomic_add_int(&hw_uma_mdpages, 1); return (va); |