aboutsummaryrefslogtreecommitdiff
path: root/sys/riscv/riscv
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2021-10-20 00:23:39 +0000
committerMark Johnston <markj@FreeBSD.org>2021-10-20 01:22:56 +0000
commita4667e09e6520dc2c4b0b988051f060fed695a91 (patch)
treeed9b1a6922f2fa0a0c125f2d06242a88a2eb5568 /sys/riscv/riscv
parentb498f71bc56af0069d9a4685b8385ee613a00727 (diff)
downloadsrc-a4667e09e6520dc2c4b0b988051f060fed695a91.tar.gz
src-a4667e09e6520dc2c4b0b988051f060fed695a91.zip
Convert vm_page_alloc() callers to use vm_page_alloc_noobj().
Remove page zeroing code from consumers and stop specifying VM_ALLOC_NOOBJ. In a few places, also convert an allocation loop to simply use VM_ALLOC_WAITOK. Similarly, convert vm_page_alloc_domain() callers. Note that callers are now responsible for assigning the pindex. Reviewed by: alc, hselasky, kib MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D31986
Diffstat (limited to 'sys/riscv/riscv')
-rw-r--r--sys/riscv/riscv/pmap.c52
-rw-r--r--sys/riscv/riscv/uma_machdep.c6
2 files changed, 20 insertions, 38 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 292b1c2f6b3f..e1ff056117eb 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -1220,17 +1220,13 @@ pmap_pinit(pmap_t pmap)
/*
* allocate the l1 page
*/
- while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
- vm_wait(NULL);
+ l1pt = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO |
+ VM_ALLOC_WAITOK);
l1phys = VM_PAGE_TO_PHYS(l1pt);
pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT);
- if ((l1pt->flags & PG_ZERO) == 0)
- pagezero(pmap->pm_l1);
-
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
CPU_ZERO(&pmap->pm_active);
@@ -1272,8 +1268,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
/*
* Allocate a page table page.
*/
- if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
+ m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (m == NULL) {
if (lockp != NULL) {
RELEASE_PV_LIST_LOCK(lockp);
PMAP_UNLOCK(pmap);
@@ -1289,9 +1285,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
*/
return (NULL);
}
-
- if ((m->flags & PG_ZERO) == 0)
- pmap_zero_page(m);
+ m->pindex = ptepindex;
/*
* Map the pagetable page into the process address space, if
@@ -1485,13 +1479,11 @@ pmap_growkernel(vm_offset_t addr)
l1 = pmap_l1(kernel_pmap, kernel_vm_end);
if (pmap_load(l1) == 0) {
/* We need a new PDP entry */
- nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
- VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
+ nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT |
VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
- if ((nkpg->flags & PG_ZERO) == 0)
- pmap_zero_page(nkpg);
+ nkpg->pindex = kernel_vm_end >> L1_SHIFT;
paddr = VM_PAGE_TO_PHYS(nkpg);
pn = (paddr / PAGE_SIZE);
@@ -1513,14 +1505,11 @@ pmap_growkernel(vm_offset_t addr)
continue;
}
- nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
- VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ nkpg = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
- if ((nkpg->flags & PG_ZERO) == 0) {
- pmap_zero_page(nkpg);
- }
+ nkpg->pindex = kernel_vm_end >> L2_SHIFT;
paddr = VM_PAGE_TO_PHYS(nkpg);
pn = (paddr / PAGE_SIZE);
@@ -1700,8 +1689,7 @@ retry:
}
}
/* No free items, allocate another chunk */
- m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED);
+ m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
if (lockp == NULL) {
PV_STAT(pc_chunk_tryfail++);
@@ -1767,8 +1755,7 @@ retry:
break;
}
for (reclaimed = false; avail < needed; avail += _NPCPV) {
- m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED);
+ m = vm_page_alloc_noobj(VM_ALLOC_WIRED);
if (m == NULL) {
m = reclaim_pv_chunk(pmap, lockp);
if (m == NULL)
@@ -2487,10 +2474,9 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
("pmap_demote_l2_locked: oldl2 is not a leaf entry"));
if ((oldl2 & PTE_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) ==
NULL) {
- if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc(NULL,
- pmap_l2_pindex(va), (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT :
- VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) ==
- NULL) {
+ if ((oldl2 & PTE_A) == 0 || (mpte = vm_page_alloc_noobj(
+ (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : 0) |
+ VM_ALLOC_WIRED)) == NULL) {
SLIST_INIT(&free);
(void)pmap_remove_l2(pmap, l2, va & ~L2_OFFSET,
pmap_load(pmap_l1(pmap, va)), &free, lockp);
@@ -2499,6 +2485,7 @@ pmap_demote_l2_locked(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
"failure for va %#lx in pmap %p", va, pmap);
return (false);
}
+ mpte->pindex = pmap_l2_pindex(va);
if (va < VM_MAXUSER_ADDRESS) {
mpte->ref_count = Ln_ENTRIES;
pmap_resident_count_inc(pmap, 1);
@@ -2750,13 +2737,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/* TODO: This is not optimal, but should mostly work */
if (l3 == NULL) {
if (l2 == NULL) {
- l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ l2_m = vm_page_alloc_noobj(VM_ALLOC_WIRED |
VM_ALLOC_ZERO);
if (l2_m == NULL)
panic("pmap_enter: l2 pte_m == NULL");
- if ((l2_m->flags & PG_ZERO) == 0)
- pmap_zero_page(l2_m);
l2_pa = VM_PAGE_TO_PHYS(l2_m);
l2_pn = (l2_pa / PAGE_SIZE);
@@ -2769,8 +2753,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
l2 = pmap_l1_to_l2(l1, va);
}
- l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
- VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ l3_m = vm_page_alloc_noobj(VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
if (l3_m == NULL)
panic("pmap_enter: l3 pte_m == NULL");
if ((l3_m->flags & PG_ZERO) == 0)
diff --git a/sys/riscv/riscv/uma_machdep.c b/sys/riscv/riscv/uma_machdep.c
index f1725fde4699..0c8abacd0302 100644
--- a/sys/riscv/riscv/uma_machdep.c
+++ b/sys/riscv/riscv/uma_machdep.c
@@ -46,16 +46,14 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
void *va;
*flags = UMA_SLAB_PRIV;
- m = vm_page_alloc_domain(NULL, 0, domain,
- malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+ m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
+ VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
- if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
- bzero(va, PAGE_SIZE);
return (va);
}