aboutsummaryrefslogtreecommitdiff
path: root/sys/amd64/amd64/pmap.c
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2021-01-09 19:40:45 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2021-01-11 20:57:58 +0000
commit9a8f5f5cf5ba834bbaa0b80178895305afd031d2 (patch)
tree3f59437ad1b262ed983debc3c9afed9ca8ea23e9 /sys/amd64/amd64/pmap.c
parentf67064e592ce0017c72ed785db64d71a459db996 (diff)
downloadsrc-9a8f5f5cf5ba834bbaa0b80178895305afd031d2.tar.gz
src-9a8f5f5cf5ba834bbaa0b80178895305afd031d2.zip
amd64 pmap: rename _pmap_allocpte() to pmap_allocpte_alloc().
The function performs actual allocation of pte, as opposed to pmap_allocpte() that uses existing free pte if pt page is already there. This also moves function out of namespace similar to a language reserved. Reviewed by: markj Tested by: pho Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D27956
Diffstat (limited to 'sys/amd64/amd64/pmap.c')
-rw-r--r--sys/amd64/amd64/pmap.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 913a37c763a8..20aed31a2098 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1275,7 +1275,7 @@ static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
pd_entry_t newpde);
static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);
-static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
+static vm_page_t pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex,
struct rwlock **lockp, vm_offset_t va);
static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
struct rwlock **lockp);
@@ -4294,8 +4294,8 @@ pmap_allocpte_getpml4(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
pml5index = pmap_pml5e_index(va);
pml5 = &pmap->pm_pmltop[pml5index];
if ((*pml5 & PG_V) == 0) {
- if (_pmap_allocpte(pmap, pmap_pml5e_pindex(va), lockp, va) ==
- NULL)
+ if (pmap_allocpte_alloc(pmap, pmap_pml5e_pindex(va), lockp,
+ va) == NULL)
return (NULL);
allocated = true;
} else {
@@ -4331,8 +4331,8 @@ pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
if ((*pml4 & PG_V) == 0) {
/* Have to allocate a new pdp, recurse */
- if (_pmap_allocpte(pmap, pmap_pml4e_pindex(va), lockp, va) ==
- NULL) {
+ if (pmap_allocpte_alloc(pmap, pmap_pml4e_pindex(va), lockp,
+ va) == NULL) {
if (pmap_is_la57(pmap))
pmap_allocpte_free_unref(pmap, va,
pmap_pml5e(pmap, va));
@@ -4395,10 +4395,10 @@ pmap_allocpte_getpdp(pmap_t pmap, struct rwlock **lockp, vm_offset_t va,
* regardless of the actual mode of operation.
*
* The root page at PML4/PML5 does not participate in this indexing scheme,
- * since it is statically allocated by pmap_pinit() and not by _pmap_allocpte().
+ * since it is statically allocated by pmap_pinit() and not by pmap_allocpte().
*/
static vm_page_t
-_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
+pmap_allocpte_alloc(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
vm_offset_t va)
{
vm_pindex_t pml5index, pml4index;
@@ -4509,7 +4509,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp,
}
if ((*pdp & PG_V) == 0) {
/* Have to allocate a new pd, recurse */
- if (_pmap_allocpte(pmap, pmap_pdpe_pindex(va),
+ if (pmap_allocpte_alloc(pmap, pmap_pdpe_pindex(va),
lockp, va) == NULL) {
pmap_allocpte_free_unref(pmap, va,
pmap_pml4e(pmap, va));
@@ -4560,7 +4560,7 @@ retry:
} else if (va < VM_MAXUSER_ADDRESS) {
/* Allocate a pd page. */
pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
- pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp, va);
+ pdpg = pmap_allocpte_alloc(pmap, NUPDE + pdpindex, lockp, va);
if (pdpg == NULL) {
if (lockp != NULL)
goto retry;
@@ -4621,7 +4621,7 @@ retry:
* Here if the pte page isn't mapped, or if it has been
* deallocated.
*/
- m = _pmap_allocpte(pmap, ptepindex, lockp, va);
+ m = pmap_allocpte_alloc(pmap, ptepindex, lockp, va);
if (m == NULL && lockp != NULL)
goto retry;
}
@@ -6640,7 +6640,7 @@ restart:
if (psind == 2) { /* 1G */
pml4e = pmap_pml4e(pmap, va);
if (pml4e == NULL || (*pml4e & PG_V) == 0) {
- mp = _pmap_allocpte(pmap, pmap_pml4e_pindex(va),
+ mp = pmap_allocpte_alloc(pmap, pmap_pml4e_pindex(va),
NULL, va);
if (mp == NULL)
goto allocf;
@@ -6661,7 +6661,7 @@ restart:
} else /* (psind == 1) */ { /* 2M */
pde = pmap_pde(pmap, va);
if (pde == NULL) {
- mp = _pmap_allocpte(pmap, pmap_pdpe_pindex(va),
+ mp = pmap_allocpte_alloc(pmap, pmap_pdpe_pindex(va),
NULL, va);
if (mp == NULL)
goto allocf;
@@ -6816,7 +6816,7 @@ retry:
* deallocated.
*/
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
- mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
+ mpte = pmap_allocpte_alloc(pmap, pmap_pde_pindex(va),
nosleep ? NULL : &lock, va);
if (mpte == NULL && nosleep) {
rv = KERN_RESOURCE_SHORTAGE;
@@ -7300,8 +7300,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* Pass NULL instead of the PV list lock
* pointer, because we don't intend to sleep.
*/
- mpte = _pmap_allocpte(pmap, ptepindex, NULL,
- va);
+ mpte = pmap_allocpte_alloc(pmap, ptepindex,
+ NULL, va);
if (mpte == NULL)
return (mpte);
}
@@ -7629,7 +7629,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
srcptepaddr = *pdpe;
pdpe = pmap_pdpe(dst_pmap, addr);
if (pdpe == NULL) {
- if (_pmap_allocpte(dst_pmap,
+ if (pmap_allocpte_alloc(dst_pmap,
pmap_pml4e_pindex(addr), NULL, addr) ==
NULL)
break;