aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustin Hibbits <jhibbits@FreeBSD.org>2020-05-27 01:24:12 +0000
committerJustin Hibbits <jhibbits@FreeBSD.org>2020-05-27 01:24:12 +0000
commit45b69dd63e84acebe987234cdba7a7ab0ecb07a7 (patch)
treeeaefead5cb00faaf54421957c23c7ac3c6a86801
parent64cc3b0c28b9dcd292a92716748ff78260c45f68 (diff)
downloadsrc-45b69dd63.tar.gz
src-45b69dd63.zip
powerpc/mmu: Convert PowerPC pmap drivers to ifunc from kobj
With IFUNC support in the kernel, we can finally get rid of our poor-man's ifunc for pmap, utilizing kobj. Since moea64 uses a second tier kobj as well, for its own private methods, this adds a second pmap install function (pmap_mmu_init()) to perform pmap 'post-install pre-bootstrap' initialization, before the IFUNCs get initialized. Reviewed by: bdragon
Notes
Notes: svn path=/head/; revision=361544
-rw-r--r--sys/conf/files.powerpc2
-rw-r--r--sys/powerpc/aim/mmu_oea.c340
-rw-r--r--sys/powerpc/aim/mmu_oea64.c600
-rw-r--r--sys/powerpc/aim/mmu_oea64.h32
-rw-r--r--sys/powerpc/aim/mmu_radix.c402
-rw-r--r--sys/powerpc/aim/moea64_if.m122
-rw-r--r--sys/powerpc/aim/moea64_native.c87
-rw-r--r--sys/powerpc/booke/booke_machdep.c1
-rw-r--r--sys/powerpc/booke/pmap.c394
-rw-r--r--sys/powerpc/booke/pmap_32.c114
-rw-r--r--sys/powerpc/booke/pmap_64.c94
-rw-r--r--sys/powerpc/include/mmuvar.h214
-rw-r--r--sys/powerpc/include/pmap.h1
-rw-r--r--sys/powerpc/powerpc/machdep.c3
-rw-r--r--sys/powerpc/powerpc/mmu_if.m1110
-rw-r--r--sys/powerpc/powerpc/pmap_dispatch.c665
-rw-r--r--sys/powerpc/ps3/mmu_ps3.c62
-rw-r--r--sys/powerpc/pseries/mmu_phyp.c76
18 files changed, 1398 insertions, 2921 deletions
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
index ecbca60ded1e..2b13810edd7b 100644
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -135,7 +135,6 @@ powerpc/aim/aim_machdep.c optional aim
powerpc/aim/mmu_oea.c optional aim powerpc
powerpc/aim/mmu_oea64.c optional aim
powerpc/aim/mmu_radix.c optional aim powerpc64
-powerpc/aim/moea64_if.m optional aim
powerpc/aim/moea64_native.c optional aim
powerpc/aim/mp_cpudep.c optional aim
powerpc/aim/slb.c optional aim powerpc64
@@ -260,7 +259,6 @@ powerpc/powerpc/iommu_if.m standard
powerpc/powerpc/machdep.c standard
powerpc/powerpc/mem.c optional mem
powerpc/powerpc/minidump_machdep.c optional powerpc64
-powerpc/powerpc/mmu_if.m standard
powerpc/powerpc/mp_machdep.c optional smp
powerpc/powerpc/nexus.c standard
powerpc/powerpc/openpic.c standard
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c
index 43fe62d1511e..c5b0b048a418 100644
--- a/sys/powerpc/aim/mmu_oea.c
+++ b/sys/powerpc/aim/mmu_oea.c
@@ -149,8 +149,6 @@ __FBSDID("$FreeBSD$");
#include <machine/mmuvar.h>
#include <machine/trap.h>
-#include "mmu_if.h"
-
#define MOEA_DEBUG
#define TODO panic("%s: not implemented", __func__);
@@ -267,125 +265,123 @@ static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
static void moea_syncicache(vm_paddr_t, vm_size_t);
static boolean_t moea_query_bit(vm_page_t, int);
static u_int moea_clear_bit(vm_page_t, int);
-static void moea_kremove(mmu_t, vm_offset_t);
+static void moea_kremove(vm_offset_t);
int moea_pte_spill(vm_offset_t);
/*
* Kernel MMU interface
*/
-void moea_clear_modify(mmu_t, vm_page_t);
-void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
-void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+void moea_clear_modify(vm_page_t);
+void moea_copy_page(vm_page_t, vm_page_t);
+void moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
+int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
int8_t);
-void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
+void moea_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
-void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
-vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
-vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
-void moea_init(mmu_t);
-boolean_t moea_is_modified(mmu_t, vm_page_t);
-boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
-boolean_t moea_is_referenced(mmu_t, vm_page_t);
-int moea_ts_referenced(mmu_t, vm_page_t);
-vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
-boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
-void moea_page_init(mmu_t, vm_page_t);
-int moea_page_wired_mappings(mmu_t, vm_page_t);
-void moea_pinit(mmu_t, pmap_t);
-void moea_pinit0(mmu_t, pmap_t);
-void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
-void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
-void moea_qremove(mmu_t, vm_offset_t, int);
-void moea_release(mmu_t, pmap_t);
-void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void moea_remove_all(mmu_t, vm_page_t);
-void moea_remove_write(mmu_t, vm_page_t);
-void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void moea_zero_page(mmu_t, vm_page_t);
-void moea_zero_page_area(mmu_t, vm_page_t, int, int);
-void moea_activate(mmu_t, struct thread *);
-void moea_deactivate(mmu_t, struct thread *);
-void moea_cpu_bootstrap(mmu_t, int);
-void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
-void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t);
-void *moea_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
-void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
-vm_paddr_t moea_kextract(mmu_t, vm_offset_t);
-void moea_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
-void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
-void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
-boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
-static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
-void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
-void moea_scan_init(mmu_t mmu);
-vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m);
-void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
-boolean_t moea_page_is_mapped(mmu_t mmu, vm_page_t m);
-static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
+void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+vm_paddr_t moea_extract(pmap_t, vm_offset_t);
+vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
+void moea_init(void);
+boolean_t moea_is_modified(vm_page_t);
+boolean_t moea_is_prefaultable(pmap_t, vm_offset_t);
+boolean_t moea_is_referenced(vm_page_t);
+int moea_ts_referenced(vm_page_t);
+vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+boolean_t moea_page_exists_quick(pmap_t, vm_page_t);
+void moea_page_init(vm_page_t);
+int moea_page_wired_mappings(vm_page_t);
+int moea_pinit(pmap_t);
+void moea_pinit0(pmap_t);
+void moea_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+void moea_qenter(vm_offset_t, vm_page_t *, int);
+void moea_qremove(vm_offset_t, int);
+void moea_release(pmap_t);
+void moea_remove(pmap_t, vm_offset_t, vm_offset_t);
+void moea_remove_all(vm_page_t);
+void moea_remove_write(vm_page_t);
+void moea_unwire(pmap_t, vm_offset_t, vm_offset_t);
+void moea_zero_page(vm_page_t);
+void moea_zero_page_area(vm_page_t, int, int);
+void moea_activate(struct thread *);
+void moea_deactivate(struct thread *);
+void moea_cpu_bootstrap(int);
+void moea_bootstrap(vm_offset_t, vm_offset_t);
+void *moea_mapdev(vm_paddr_t, vm_size_t);
+void *moea_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
+void moea_unmapdev(vm_offset_t, vm_size_t);
+vm_paddr_t moea_kextract(vm_offset_t);
+void moea_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t);
+void moea_kenter(vm_offset_t, vm_paddr_t);
+void moea_page_set_memattr(vm_page_t m, vm_memattr_t ma);
+boolean_t moea_dev_direct_mapped(vm_paddr_t, vm_size_t);
+static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t);
+void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
+void moea_scan_init(void);
+vm_offset_t moea_quick_enter_page(vm_page_t m);
+void moea_quick_remove_page(vm_offset_t addr);
+boolean_t moea_page_is_mapped(vm_page_t m);
+static int moea_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
-static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+static int moea_decode_kernel_ptr(vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
-static mmu_method_t moea_methods[] = {
- MMUMETHOD(mmu_clear_modify, moea_clear_modify),
- MMUMETHOD(mmu_copy_page, moea_copy_page),
- MMUMETHOD(mmu_copy_pages, moea_copy_pages),
- MMUMETHOD(mmu_enter, moea_enter),
- MMUMETHOD(mmu_enter_object, moea_enter_object),
- MMUMETHOD(mmu_enter_quick, moea_enter_quick),
- MMUMETHOD(mmu_extract, moea_extract),
- MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold),
- MMUMETHOD(mmu_init, moea_init),
- MMUMETHOD(mmu_is_modified, moea_is_modified),
- MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable),
- MMUMETHOD(mmu_is_referenced, moea_is_referenced),
- MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
- MMUMETHOD(mmu_map, moea_map),
- MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
- MMUMETHOD(mmu_page_init, moea_page_init),
- MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
- MMUMETHOD(mmu_pinit, moea_pinit),
- MMUMETHOD(mmu_pinit0, moea_pinit0),
- MMUMETHOD(mmu_protect, moea_protect),
- MMUMETHOD(mmu_qenter, moea_qenter),
- MMUMETHOD(mmu_qremove, moea_qremove),
- MMUMETHOD(mmu_release, moea_release),
- MMUMETHOD(mmu_remove, moea_remove),
- MMUMETHOD(mmu_remove_all, moea_remove_all),
- MMUMETHOD(mmu_remove_write, moea_remove_write),
- MMUMETHOD(mmu_sync_icache, moea_sync_icache),
- MMUMETHOD(mmu_unwire, moea_unwire),
- MMUMETHOD(mmu_zero_page, moea_zero_page),
- MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
- MMUMETHOD(mmu_activate, moea_activate),
- MMUMETHOD(mmu_deactivate, moea_deactivate),
- MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
- MMUMETHOD(mmu_quick_enter_page, moea_quick_enter_page),
- MMUMETHOD(mmu_quick_remove_page, moea_quick_remove_page),
- MMUMETHOD(mmu_page_is_mapped, moea_page_is_mapped),
+static struct pmap_funcs moea_methods = {
+ .clear_modify = moea_clear_modify,
+ .copy_page = moea_copy_page,
+ .copy_pages = moea_copy_pages,
+ .enter = moea_enter,
+ .enter_object = moea_enter_object,
+ .enter_quick = moea_enter_quick,
+ .extract = moea_extract,
+ .extract_and_hold = moea_extract_and_hold,
+ .init = moea_init,
+ .is_modified = moea_is_modified,
+ .is_prefaultable = moea_is_prefaultable,
+ .is_referenced = moea_is_referenced,
+ .ts_referenced = moea_ts_referenced,
+ .map = moea_map,
+ .page_exists_quick = moea_page_exists_quick,
+ .page_init = moea_page_init,
+ .page_wired_mappings = moea_page_wired_mappings,
+ .pinit = moea_pinit,
+ .pinit0 = moea_pinit0,
+ .protect = moea_protect,
+ .qenter = moea_qenter,
+ .qremove = moea_qremove,
+ .release = moea_release,
+ .remove = moea_remove,
+ .remove_all = moea_remove_all,
+ .remove_write = moea_remove_write,
+ .sync_icache = moea_sync_icache,
+ .unwire = moea_unwire,
+ .zero_page = moea_zero_page,
+ .zero_page_area = moea_zero_page_area,
+ .activate = moea_activate,
+ .deactivate = moea_deactivate,
+ .page_set_memattr = moea_page_set_memattr,
+ .quick_enter_page = moea_quick_enter_page,
+ .quick_remove_page = moea_quick_remove_page,
+ .page_is_mapped = moea_page_is_mapped,
/* Internal interfaces */
- MMUMETHOD(mmu_bootstrap, moea_bootstrap),
- MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap),
- MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr),
- MMUMETHOD(mmu_mapdev, moea_mapdev),
- MMUMETHOD(mmu_unmapdev, moea_unmapdev),
- MMUMETHOD(mmu_kextract, moea_kextract),
- MMUMETHOD(mmu_kenter, moea_kenter),
- MMUMETHOD(mmu_kenter_attr, moea_kenter_attr),
- MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
- MMUMETHOD(mmu_scan_init, moea_scan_init),
- MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map),
- MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr),
- MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr),
-
- { 0, 0 }
+ .bootstrap = moea_bootstrap,
+ .cpu_bootstrap = moea_cpu_bootstrap,
+ .mapdev_attr = moea_mapdev_attr,
+ .mapdev = moea_mapdev,
+ .unmapdev = moea_unmapdev,
+ .kextract = moea_kextract,
+ .kenter = moea_kenter,
+ .kenter_attr = moea_kenter_attr,
+ .dev_direct_mapped = moea_dev_direct_mapped,
+ .dumpsys_pa_init = moea_scan_init,
+ .dumpsys_map_chunk = moea_dumpsys_map,
+ .map_user_ptr = moea_map_user_ptr,
+ .decode_kernel_ptr = moea_decode_kernel_ptr,
};
-MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
+MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods);
static __inline uint32_t
moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
@@ -627,7 +623,7 @@ om_cmp(const void *a, const void *b)
}
void
-moea_cpu_bootstrap(mmu_t mmup, int ap)
+moea_cpu_bootstrap(int ap)
{
u_int sdr;
int i;
@@ -665,7 +661,7 @@ moea_cpu_bootstrap(mmu_t mmup, int ap)
}
void
-moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
+moea_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
{
ihandle_t mmui;
phandle_t chosen, mmu;
@@ -921,7 +917,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
/* Enter the pages */
for (off = 0; off < translations[i].om_len;
off += PAGE_SIZE)
- moea_kenter(mmup, translations[i].om_va + off,
+ moea_kenter(translations[i].om_va + off,
translations[i].om_pa + off);
}
}
@@ -933,7 +929,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
;
Maxmem = powerpc_btop(phys_avail[i + 1]);
- moea_cpu_bootstrap(mmup,0);
+ moea_cpu_bootstrap(0);
mtmsr(mfmsr() | PSL_DR | PSL_IR);
pmap_bootstrapped++;
@@ -954,7 +950,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
thread0.td_kstack = va;
thread0.td_kstack_pages = kstack_pages;
for (i = 0; i < kstack_pages; i++) {
- moea_kenter(mmup, va, pa);
+ moea_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -967,7 +963,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
va = virtual_avail;
virtual_avail += round_page(msgbufsize);
while (va < virtual_avail) {
- moea_kenter(mmup, va, pa);
+ moea_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -980,7 +976,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
va = virtual_avail;
virtual_avail += DPCPU_SIZE;
while (va < virtual_avail) {
- moea_kenter(mmup, va, pa);
+ moea_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -992,7 +988,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
* space can be accessed in any way.
*/
void
-moea_activate(mmu_t mmu, struct thread *td)
+moea_activate(struct thread *td)
{
pmap_t pm, pmr;
@@ -1010,7 +1006,7 @@ moea_activate(mmu_t mmu, struct thread *td)
}
void
-moea_deactivate(mmu_t mmu, struct thread *td)
+moea_deactivate(struct thread *td)
{
pmap_t pm;
@@ -1020,7 +1016,7 @@ moea_deactivate(mmu_t mmu, struct thread *td)
}
void
-moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+moea_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
struct pvo_entry key, *pvo;
@@ -1038,7 +1034,7 @@ moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
}
void
-moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
+moea_copy_page(vm_page_t msrc, vm_page_t mdst)
{
vm_offset_t dst;
vm_offset_t src;
@@ -1050,7 +1046,7 @@ moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
}
void
-moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
{
void *a_cp, *b_cp;
@@ -1077,7 +1073,7 @@ moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
* Zero a page of physical memory by temporarily mapping it into the tlb.
*/
void
-moea_zero_page(mmu_t mmu, vm_page_t m)
+moea_zero_page(vm_page_t m)
{
vm_offset_t off, pa = VM_PAGE_TO_PHYS(m);
@@ -1086,7 +1082,7 @@ moea_zero_page(mmu_t mmu, vm_page_t m)
}
void
-moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
+moea_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t pa = VM_PAGE_TO_PHYS(m);
void *va = (void *)(pa + off);
@@ -1095,19 +1091,19 @@ moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
}
vm_offset_t
-moea_quick_enter_page(mmu_t mmu, vm_page_t m)
+moea_quick_enter_page(vm_page_t m)
{
return (VM_PAGE_TO_PHYS(m));
}
void
-moea_quick_remove_page(mmu_t mmu, vm_offset_t addr)
+moea_quick_remove_page(vm_offset_t addr)
{
}
boolean_t
-moea_page_is_mapped(mmu_t mmu, vm_page_t m)
+moea_page_is_mapped(vm_page_t m)
{
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
}
@@ -1118,7 +1114,7 @@ moea_page_is_mapped(mmu_t mmu, vm_page_t m)
* will be wired down.
*/
int
-moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+moea_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
u_int flags, int8_t psind)
{
int error;
@@ -1216,7 +1212,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* corresponding offset from m_start are mapped.
*/
void
-moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
+moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
vm_page_t m;
@@ -1239,7 +1235,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
}
void
-moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
+moea_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
@@ -1252,7 +1248,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
}
vm_paddr_t
-moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
+moea_extract(pmap_t pm, vm_offset_t va)
{
struct pvo_entry *pvo;
vm_paddr_t pa;
@@ -1273,7 +1269,7 @@ moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
* protection.
*/
vm_page_t
-moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+moea_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
struct pvo_entry *pvo;
vm_page_t m;
@@ -1293,7 +1289,7 @@ moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
}
void
-moea_init(mmu_t mmu)
+moea_init()
{
moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
@@ -1306,7 +1302,7 @@ moea_init(mmu_t mmu)
}
boolean_t
-moea_is_referenced(mmu_t mmu, vm_page_t m)
+moea_is_referenced(vm_page_t m)
{
boolean_t rv;
@@ -1319,7 +1315,7 @@ moea_is_referenced(mmu_t mmu, vm_page_t m)
}
boolean_t
-moea_is_modified(mmu_t mmu, vm_page_t m)
+moea_is_modified(vm_page_t m)
{
boolean_t rv;
@@ -1339,7 +1335,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
}
boolean_t
-moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+moea_is_prefaultable(pmap_t pmap, vm_offset_t va)
{
struct pvo_entry *pvo;
boolean_t rv;
@@ -1352,7 +1348,7 @@ moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
}
void
-moea_clear_modify(mmu_t mmu, vm_page_t m)
+moea_clear_modify(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
@@ -1370,7 +1366,7 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
* Clear the write and modified bits in each of the given page's mappings.
*/
void
-moea_remove_write(mmu_t mmu, vm_page_t m)
+moea_remove_write(vm_page_t m)
{
struct pvo_entry *pvo;
struct pte *pt;
@@ -1425,7 +1421,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
* optimal aging of shared pages.
*/
int
-moea_ts_referenced(mmu_t mmu, vm_page_t m)
+moea_ts_referenced(vm_page_t m)
{
int count;
@@ -1441,7 +1437,7 @@ moea_ts_referenced(mmu_t mmu, vm_page_t m)
* Modify the WIMG settings of all mappings for a page.
*/
void
-moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
+moea_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
struct pvo_entry *pvo;
struct pvo_head *pvo_head;
@@ -1481,14 +1477,14 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
* Map a wired page into kernel virtual address space.
*/
void
-moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
+moea_kenter(vm_offset_t va, vm_paddr_t pa)
{
- moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+ moea_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
}
void
-moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
+moea_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
u_int pte_lo;
int error;
@@ -1517,7 +1513,7 @@ moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
* address.
*/
vm_paddr_t
-moea_kextract(mmu_t mmu, vm_offset_t va)
+moea_kextract(vm_offset_t va)
{
struct pvo_entry *pvo;
vm_paddr_t pa;
@@ -1541,10 +1537,10 @@ moea_kextract(mmu_t mmu, vm_offset_t va)
* Remove a wired page from kernel virtual address space.
*/
void
-moea_kremove(mmu_t mmu, vm_offset_t va)
+moea_kremove(vm_offset_t va)
{
- moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
+ moea_remove(kernel_pmap, va, va + PAGE_SIZE);
}
/*
@@ -1553,7 +1549,7 @@ moea_kremove(mmu_t mmu, vm_offset_t va)
* called in this thread. This is used internally in copyin/copyout.
*/
int
-moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
+moea_map_user_ptr(pmap_t pm, volatile const void *uaddr,
void **kaddr, size_t ulen, size_t *klen)
{
size_t l;
@@ -1592,7 +1588,7 @@ moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
* address space.
*/
static int
-moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+moea_decode_kernel_ptr(vm_offset_t addr, int *is_user,
vm_offset_t *decoded_addr)
{
vm_offset_t user_sr;
@@ -1621,7 +1617,7 @@ moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
* first usable address after the mapped region.
*/
vm_offset_t
-moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
+moea_map(vm_offset_t *virt, vm_paddr_t pa_start,
vm_paddr_t pa_end, int prot)
{
vm_offset_t sva, va;
@@ -1629,7 +1625,7 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
sva = *virt;
va = sva;
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
- moea_kenter(mmu, va, pa_start);
+ moea_kenter(va, pa_start);
*virt = va;
return (sva);
}
@@ -1642,7 +1638,7 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
* subset of pmaps for proper page aging.
*/
boolean_t
-moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
+moea_page_exists_quick(pmap_t pmap, vm_page_t m)
{
int loops;
struct pvo_entry *pvo;
@@ -1666,7 +1662,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
}
void
-moea_page_init(mmu_t mmu __unused, vm_page_t m)
+moea_page_init(vm_page_t m)
{
m->md.mdpg_attrs = 0;
@@ -1679,7 +1675,7 @@ moea_page_init(mmu_t mmu __unused, vm_page_t m)
* that are wired.
*/
int
-moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
+moea_page_wired_mappings(vm_page_t m)
{
struct pvo_entry *pvo;
int count;
@@ -1697,8 +1693,8 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
static u_int moea_vsidcontext;
-void
-moea_pinit(mmu_t mmu, pmap_t pmap)
+int
+moea_pinit(pmap_t pmap)
{
int i, mask;
u_int entropy;
@@ -1708,7 +1704,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
entropy = 0;
__asm __volatile("mftb %0" : "=r"(entropy));
- if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap))
+ if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap))
== NULL) {
pmap->pmap_phys = pmap;
}
@@ -1752,7 +1748,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
for (i = 0; i < 16; i++)
pmap->pm_sr[i] = VSID_MAKE(i, hash);
mtx_unlock(&moea_vsid_mutex);
- return;
+ return (1);
}
mtx_unlock(&moea_vsid_mutex);
@@ -1763,11 +1759,11 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
* Initialize the pmap associated with process 0.
*/
void
-moea_pinit0(mmu_t mmu, pmap_t pm)
+moea_pinit0(pmap_t pm)
{
PMAP_LOCK_INIT(pm);
- moea_pinit(mmu, pm);
+ moea_pinit(pm);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
@@ -1775,7 +1771,7 @@ moea_pinit0(mmu_t mmu, pmap_t pm)
* Set the physical protection on the specified range of this map as requested.
*/
void
-moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
+moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
vm_prot_t prot)
{
struct pvo_entry *pvo, *tpvo, key;
@@ -1785,7 +1781,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
("moea_protect: non current pmap"));
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
- moea_remove(mmu, pm, sva, eva);
+ moea_remove(pm, sva, eva);
return;
}
@@ -1825,13 +1821,13 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
* references recorded. Existing mappings in the region are overwritten.
*/
void
-moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
+moea_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
va = sva;
while (count-- > 0) {
- moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
+ moea_kenter(va, VM_PAGE_TO_PHYS(*m));
va += PAGE_SIZE;
m++;
}
@@ -1842,19 +1838,19 @@ moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
* temporary mappings entered by moea_qenter.
*/
void
-moea_qremove(mmu_t mmu, vm_offset_t sva, int count)
+moea_qremove(vm_offset_t sva, int count)
{
vm_offset_t va;
va = sva;
while (count-- > 0) {
- moea_kremove(mmu, va);
+ moea_kremove(va);
va += PAGE_SIZE;
}
}
void
-moea_release(mmu_t mmu, pmap_t pmap)
+moea_release(pmap_t pmap)
{
int idx, mask;
@@ -1876,7 +1872,7 @@ moea_release(mmu_t mmu, pmap_t pmap)
* Remove the given range of addresses from the specified map.
*/
void
-moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+moea_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
struct pvo_entry *pvo, *tpvo, key;
@@ -1897,7 +1893,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
* will reflect changes in pte's back to the vm_page.
*/
void
-moea_remove_all(mmu_t mmu, vm_page_t m)
+moea_remove_all(vm_page_t m)
{
struct pvo_head *pvo_head;
struct pvo_entry *pvo, *next_pvo;
@@ -2600,7 +2596,7 @@ moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size)
}
boolean_t
-moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+moea_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
{
int i;
@@ -2623,14 +2619,14 @@ moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
* NOT real memory.
*/
void *
-moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+moea_mapdev(vm_paddr_t pa, vm_size_t size)
{
- return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
+ return (moea_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
}
void *
-moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
+moea_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, ppa, offset;
int i;
@@ -2654,7 +2650,7 @@ moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
panic("moea_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
- moea_kenter_attr(mmu, tmpva, ppa, ma);
+ moea_kenter_attr(tmpva, ppa, ma);
tlbie(tmpva);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
@@ -2665,7 +2661,7 @@ moea_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
}
void
-moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
+moea_unmapdev(vm_offset_t va, vm_size_t size)
{
vm_offset_t base, offset;
@@ -2682,7 +2678,7 @@ moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
}
static void
-moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+moea_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
struct pvo_entry *pvo;
vm_offset_t lim;
@@ -2706,7 +2702,7 @@ moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
}
void
-moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
+moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
{
*va = (void *)pa;
@@ -2715,7 +2711,7 @@ moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
void
-moea_scan_init(mmu_t mmu)
+moea_scan_init()
{
struct pvo_entry *pvo;
vm_offset_t va;
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index a8f84d49d8d8..d3703496d027 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$");
#include <machine/_inttypes.h>
#include <machine/cpu.h>
+#include <machine/ifunc.h>
#include <machine/platform.h>
#include <machine/frame.h>
#include <machine/md_var.h>
@@ -96,11 +97,9 @@ __FBSDID("$FreeBSD$");
#include <machine/mmuvar.h>
#include "mmu_oea64.h"
-#include "mmu_if.h"
-#include "moea64_if.h"
void moea64_release_vsid(uint64_t vsid);
-uintptr_t moea64_get_unique_vsid(void);
+uintptr_t moea64_get_unique_vsid(void);
#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
#define ENABLE_TRANS(msr) mtmsr(msr)
@@ -111,7 +110,7 @@ uintptr_t moea64_get_unique_vsid(void);
/*
* Locking semantics:
- *
+ *
* There are two locks of interest: the page locks and the pmap locks, which
* protect their individual PVO lists and are locked in that order. The contents
* of all PVO entries are protected by the locks of their respective pmaps.
@@ -122,7 +121,7 @@ uintptr_t moea64_get_unique_vsid(void);
#define PV_LOCK_COUNT PA_LOCK_COUNT
static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
-
+
/*
* Cheap NUMA-izing of the pv locks, to reduce contention across domains.
* NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the
@@ -184,7 +183,7 @@ uma_zone_t moea64_pvo_zone; /* zone for pvo entries */
static struct pvo_entry *moea64_bpvo_pool;
static int moea64_bpvo_pool_index = 0;
static int moea64_bpvo_pool_size = 0;
-SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
+SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
&moea64_bpvo_pool_index, 0, "");
#define BPVO_POOL_SIZE 327680 /* Sensible historical default value */
@@ -210,11 +209,11 @@ u_int moea64_pte_overflow = 0;
u_int moea64_pvo_entries = 0;
u_int moea64_pvo_enter_calls = 0;
u_int moea64_pvo_remove_calls = 0;
-SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
&moea64_pte_valid, 0, "");
SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
&moea64_pte_overflow, 0, "");
-SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
+SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
&moea64_pvo_entries, 0, "");
SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
&moea64_pvo_enter_calls, 0, "");
@@ -233,149 +232,147 @@ int moea64_large_page_shift = 0;
/*
* PVO calls.
*/
-static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
+static int moea64_pvo_enter(struct pvo_entry *pvo,
struct pvo_head *pvo_head, struct pvo_entry **oldpvo);
-static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
-static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
-static void moea64_pvo_remove_from_page_locked(mmu_t mmu,
+static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo);
+static void moea64_pvo_remove_from_page(struct pvo_entry *pvo);
+static void moea64_pvo_remove_from_page_locked(
struct pvo_entry *pvo, vm_page_t m);
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
/*
* Utility routines.
*/
-static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t);
-static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
-static void moea64_kremove(mmu_t, vm_offset_t);
-static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
+static boolean_t moea64_query_bit(vm_page_t, uint64_t);
+static u_int moea64_clear_bit(vm_page_t, uint64_t);
+static void moea64_kremove(vm_offset_t);
+static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
vm_paddr_t pa, vm_size_t sz);
static void moea64_pmap_init_qpages(void);
/*
* Kernel MMU interface
*/
-void moea64_clear_modify(mmu_t, vm_page_t);
-void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
-void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+void moea64_clear_modify(vm_page_t);
+void moea64_copy_page(vm_page_t, vm_page_t);
+void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
+int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
u_int flags, int8_t psind);
-void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
+void moea64_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
-void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
-vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
-vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
-void moea64_init(mmu_t);
-boolean_t moea64_is_modified(mmu_t, vm_page_t);
-boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
-boolean_t moea64_is_referenced(mmu_t, vm_page_t);
-int moea64_ts_referenced(mmu_t, vm_page_t);
-vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
-boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
-void moea64_page_init(mmu_t, vm_page_t);
-int moea64_page_wired_mappings(mmu_t, vm_page_t);
-void moea64_pinit(mmu_t, pmap_t);
-void moea64_pinit0(mmu_t, pmap_t);
-void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
-void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
-void moea64_qremove(mmu_t, vm_offset_t, int);
-void moea64_release(mmu_t, pmap_t);
-void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void moea64_remove_pages(mmu_t, pmap_t);
-void moea64_remove_all(mmu_t, vm_page_t);
-void moea64_remove_write(mmu_t, vm_page_t);
-void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void moea64_zero_page(mmu_t, vm_page_t);
-void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
-void moea64_activate(mmu_t, struct thread *);
-void moea64_deactivate(mmu_t, struct thread *);
-void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
-void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
-void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
-vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
-void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
-void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
-void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
-boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
-static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
-void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
+void moea64_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+vm_paddr_t moea64_extract(pmap_t, vm_offset_t);
+vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
+void moea64_init(void);
+boolean_t moea64_is_modified(vm_page_t);
+boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t);
+boolean_t moea64_is_referenced(vm_page_t);
+int moea64_ts_referenced(vm_page_t);
+vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+boolean_t moea64_page_exists_quick(pmap_t, vm_page_t);
+void moea64_page_init(vm_page_t);
+int moea64_page_wired_mappings(vm_page_t);
+int moea64_pinit(pmap_t);
+void moea64_pinit0(pmap_t);
+void moea64_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+void moea64_qenter(vm_offset_t, vm_page_t *, int);
+void moea64_qremove(vm_offset_t, int);
+void moea64_release(pmap_t);
+void moea64_remove(pmap_t, vm_offset_t, vm_offset_t);
+void moea64_remove_pages(pmap_t);
+void moea64_remove_all(vm_page_t);
+void moea64_remove_write(vm_page_t);
+void moea64_unwire(pmap_t, vm_offset_t, vm_offset_t);
+void moea64_zero_page(vm_page_t);
+void moea64_zero_page_area(vm_page_t, int, int);
+void moea64_activate(struct thread *);
+void moea64_deactivate(struct thread *);
+void *moea64_mapdev(vm_paddr_t, vm_size_t);
+void *moea64_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
+void moea64_unmapdev(vm_offset_t, vm_size_t);
+vm_paddr_t moea64_kextract(vm_offset_t);
+void moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma);
+void moea64_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
+void moea64_kenter(vm_offset_t, vm_paddr_t);
+boolean_t moea64_dev_direct_mapped(vm_paddr_t, vm_size_t);
+static void moea64_sync_icache(pmap_t, vm_offset_t, vm_size_t);
+void moea64_dumpsys_map(vm_paddr_t pa, size_t sz,
void **va);
-void moea64_scan_init(mmu_t mmu);
-vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
-void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
-boolean_t moea64_page_is_mapped(mmu_t mmu, vm_page_t m);
-static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
+void moea64_scan_init(void);
+vm_offset_t moea64_quick_enter_page(vm_page_t m);
+void moea64_quick_remove_page(vm_offset_t addr);
+boolean_t moea64_page_is_mapped(vm_page_t m);
+static int moea64_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
-static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+static int moea64_decode_kernel_ptr(vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
-static size_t moea64_scan_pmap(mmu_t mmu);
-static void *moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs);
+static size_t moea64_scan_pmap(void);
+static void *moea64_dump_pmap_init(unsigned blkpgs);
#ifdef __powerpc64__
-static void moea64_page_array_startup(mmu_t, long);
+static void moea64_page_array_startup(long);
#endif
-static mmu_method_t moea64_methods[] = {
- MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
- MMUMETHOD(mmu_copy_page, moea64_copy_page),
- MMUMETHOD(mmu_copy_pages, moea64_copy_pages),
- MMUMETHOD(mmu_enter, moea64_enter),
- MMUMETHOD(mmu_enter_object, moea64_enter_object),
- MMUMETHOD(mmu_enter_quick, moea64_enter_quick),
- MMUMETHOD(mmu_extract, moea64_extract),
- MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold),
- MMUMETHOD(mmu_init, moea64_init),
- MMUMETHOD(mmu_is_modified, moea64_is_modified),
- MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable),
- MMUMETHOD(mmu_is_referenced, moea64_is_referenced),
- MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced),
- MMUMETHOD(mmu_map, moea64_map),
- MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
- MMUMETHOD(mmu_page_init, moea64_page_init),
- MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
- MMUMETHOD(mmu_pinit, moea64_pinit),
- MMUMETHOD(mmu_pinit0, moea64_pinit0),
- MMUMETHOD(mmu_protect, moea64_protect),
- MMUMETHOD(mmu_qenter, moea64_qenter),
- MMUMETHOD(mmu_qremove, moea64_qremove),
- MMUMETHOD(mmu_release, moea64_release),
- MMUMETHOD(mmu_remove, moea64_remove),
- MMUMETHOD(mmu_remove_pages, moea64_remove_pages),
- MMUMETHOD(mmu_remove_all, moea64_remove_all),
- MMUMETHOD(mmu_remove_write, moea64_remove_write),
- MMUMETHOD(mmu_sync_icache, moea64_sync_icache),
- MMUMETHOD(mmu_unwire, moea64_unwire),
- MMUMETHOD(mmu_zero_page, moea64_zero_page),
- MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area),
- MMUMETHOD(mmu_activate, moea64_activate),
- MMUMETHOD(mmu_deactivate, moea64_deactivate),
- MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
- MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
- MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
- MMUMETHOD(mmu_page_is_mapped, moea64_page_is_mapped),
+static struct pmap_funcs moea64_methods = {
+ .clear_modify = moea64_clear_modify,
+ .copy_page = moea64_copy_page,
+ .copy_pages = moea64_copy_pages,
+ .enter = moea64_enter,
+ .enter_object = moea64_enter_object,
+ .enter_quick = moea64_enter_quick,
+ .extract = moea64_extract,
+ .extract_and_hold = moea64_extract_and_hold,
+ .init = moea64_init,
+ .is_modified = moea64_is_modified,
+ .is_prefaultable = moea64_is_prefaultable,
+ .is_referenced = moea64_is_referenced,
+ .ts_referenced = moea64_ts_referenced,
+ .map = moea64_map,
+ .page_exists_quick = moea64_page_exists_quick,
+ .page_init = moea64_page_init,
+ .page_wired_mappings = moea64_page_wired_mappings,
+ .pinit = moea64_pinit,
+ .pinit0 = moea64_pinit0,
+ .protect = moea64_protect,
+ .qenter = moea64_qenter,
+ .qremove = moea64_qremove,
+ .release = moea64_release,
+ .remove = moea64_remove,
+ .remove_pages = moea64_remove_pages,
+ .remove_all = moea64_remove_all,
+ .remove_write = moea64_remove_write,
+ .sync_icache = moea64_sync_icache,
+ .unwire = moea64_unwire,
+ .zero_page = moea64_zero_page,
+ .zero_page_area = moea64_zero_page_area,
+ .activate = moea64_activate,
+ .deactivate = moea64_deactivate,
+ .page_set_memattr = moea64_page_set_memattr,
+ .quick_enter_page = moea64_quick_enter_page,
+ .quick_remove_page = moea64_quick_remove_page,
+ .page_is_mapped = moea64_page_is_mapped,
#ifdef __powerpc64__
- MMUMETHOD(mmu_page_array_startup, moea64_page_array_startup),
+ .page_array_startup = moea64_page_array_startup,
#endif
/* Internal interfaces */
- MMUMETHOD(mmu_mapdev, moea64_mapdev),
- MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr),
- MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
- MMUMETHOD(mmu_kextract, moea64_kextract),
- MMUMETHOD(mmu_kenter, moea64_kenter),
- MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
- MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
- MMUMETHOD(mmu_scan_init, moea64_scan_init),
- MMUMETHOD(mmu_scan_pmap, moea64_scan_pmap),
- MMUMETHOD(mmu_dump_pmap_init, moea64_dump_pmap_init),
- MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
- MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr),
- MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
-
- { 0, 0 }
+ .mapdev = moea64_mapdev,
+ .mapdev_attr = moea64_mapdev_attr,
+ .unmapdev = moea64_unmapdev,
+ .kextract = moea64_kextract,
+ .kenter = moea64_kenter,
+ .kenter_attr = moea64_kenter_attr,
+ .dev_direct_mapped = moea64_dev_direct_mapped,
+ .dumpsys_pa_init = moea64_scan_init,
+ .dumpsys_scan_pmap = moea64_scan_pmap,
+ .dumpsys_dump_pmap_init = moea64_dump_pmap_init,
+ .dumpsys_map_chunk = moea64_dumpsys_map,
+ .map_user_ptr = moea64_map_user_ptr,
+ .decode_kernel_ptr = moea64_decode_kernel_ptr,
};
-MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
+MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods);
static struct pvo_head *
vm_page_to_pvoh(vm_page_t m)
@@ -523,7 +520,7 @@ om_cmp(const void *a, const void *b)
}
static void
-moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
+moea64_add_ofw_mappings(phandle_t mmu, size_t sz)
{
struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
pcell_t acells, trans_cells[sz/sizeof(cell_t)];
@@ -590,7 +587,7 @@ moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
if (pvo != NULL)
continue;
- moea64_kenter(mmup, translations[i].om_va + off,
+ moea64_kenter(translations[i].om_va + off,
pa_base + off);
}
ENABLE_TRANS(msr);
@@ -649,7 +646,7 @@ moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
#endif
static int
-moea64_kenter_large(mmu_t mmup, vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap)
+moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap)
{
struct pvo_entry *pvo;
uint64_t pte_lo;
@@ -665,14 +662,14 @@ moea64_kenter_large(mmu_t mmup, vm_offset_t va, vm_paddr_t pa, uint64_t attr, in
pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
VM_PROT_EXECUTE;
pvo->pvo_pte.pa = pa | pte_lo;
- error = moea64_pvo_enter(mmup, pvo, NULL, NULL);
+ error = moea64_pvo_enter(pvo, NULL, NULL);
if (error != 0)
panic("Error %d inserting large page\n", error);
return (0);
}
static void
-moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
+moea64_setup_direct_map(vm_offset_t kernelstart,
vm_offset_t kernelend)
{
register_t msr;
@@ -681,7 +678,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
uint64_t pte_lo;
int i;
- if (moea64_large_page_size == 0)
+ if (moea64_large_page_size == 0)
hw_direct_map = 0;
DISABLE_TRANS(msr);
@@ -699,7 +696,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
pregions[i].mr_start + pregions[i].mr_size)
pte_lo |= LPTE_G;
- moea64_kenter_large(mmup, PHYS_TO_DMAP(pa), pa, pte_lo, 1);
+ moea64_kenter_large(PHYS_TO_DMAP(pa), pa, pte_lo, 1);
}
}
PMAP_UNLOCK(kernel_pmap);
@@ -718,24 +715,24 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
*/
for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
pa += PAGE_SIZE)
- moea64_kenter(mmup, pa, pa);
+ moea64_kenter(pa, pa);
} else if (!hw_direct_map) {
pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
pa += PAGE_SIZE)
- moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
+ moea64_kenter(pa | DMAP_BASE_ADDRESS, pa);
}
if (!hw_direct_map) {
size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
off = (vm_offset_t)(moea64_bpvo_pool);
for (pa = off; pa < off + size; pa += PAGE_SIZE)
- moea64_kenter(mmup, pa, pa);
+ moea64_kenter(pa, pa);
/* Map exception vectors */
for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
- moea64_kenter(mmup, pa | DMAP_BASE_ADDRESS, pa);
+ moea64_kenter(pa | DMAP_BASE_ADDRESS, pa);
}
ENABLE_TRANS(msr);
@@ -763,7 +760,7 @@ pa_cmp(const void *a, const void *b)
}
void
-moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
+moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
{
int i, j;
vm_size_t physsz, hwphyssz;
@@ -899,7 +896,7 @@ moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelen
}
void
-moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
+moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
{
int i;
@@ -960,7 +957,7 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
pcpup->pc_aim.slb[i].slbe = 0;
}
#else
- for (i = 0; i < 16; i++)
+ for (i = 0; i < 16; i++)
kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
#endif
@@ -974,11 +971,11 @@ moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
* Now map in all the other buffers we allocated earlier
*/
- moea64_setup_direct_map(mmup, kernelstart, kernelend);
+ moea64_setup_direct_map(kernelstart, kernelend);
}
void
-moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
+moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
{
ihandle_t mmui;
phandle_t chosen;
@@ -1003,7 +1000,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
panic("moea64_bootstrap: too many ofw translations");
if (sz > 0)
- moea64_add_ofw_mappings(mmup, mmu, sz);
+ moea64_add_ofw_mappings(mmu, sz);
}
/*
@@ -1016,7 +1013,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
/*
* Initialize MMU.
*/
- MMU_CPU_BOOTSTRAP(mmup,0);
+ pmap_cpu_bootstrap(0);
mtmsr(mfmsr() | PSL_DR | PSL_IR);
pmap_bootstrapped++;
@@ -1066,7 +1063,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
thread0.td_kstack = va;
thread0.td_kstack_pages = kstack_pages;
for (i = 0; i < kstack_pages; i++) {
- moea64_kenter(mmup, va, pa);
+ moea64_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -1079,7 +1076,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
va = virtual_avail;
virtual_avail += round_page(msgbufsize);
while (va < virtual_avail) {
- moea64_kenter(mmup, va, pa);
+ moea64_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -1092,7 +1089,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
va = virtual_avail;
virtual_avail += DPCPU_SIZE;
while (va < virtual_avail) {
- moea64_kenter(mmup, va, pa);
+ moea64_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -1115,7 +1112,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
virtual_end -= PAGE_SIZE;
- moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
+ moea64_kenter(moea64_scratchpage_va[i], 0);
PMAP_LOCK(kernel_pmap);
moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
@@ -1156,7 +1153,7 @@ SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
* state.
*/
void
-moea64_activate(mmu_t mmu, struct thread *td)
+moea64_activate(struct thread *td)
{
pmap_t pm;
@@ -1174,7 +1171,7 @@ moea64_activate(mmu_t mmu, struct thread *td)
}
void
-moea64_deactivate(mmu_t mmu, struct thread *td)
+moea64_deactivate(struct thread *td)
{
pmap_t pm;
@@ -1190,7 +1187,7 @@ moea64_deactivate(mmu_t mmu, struct thread *td)
}
void
-moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+moea64_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
struct pvo_entry key, *pvo;
vm_page_t m;
@@ -1205,7 +1202,7 @@ moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
panic("moea64_unwire: pvo %p is missing PVO_WIRED",
pvo);
pvo->pvo_vaddr &= ~PVO_WIRED;
- refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
+ refchg = moea64_pte_replace(pvo, 0 /* No invalidation */);
if ((pvo->pvo_vaddr & PVO_MANAGED) &&
(pvo->pvo_pte.prot & VM_PROT_WRITE)) {
if (refchg < 0)
@@ -1231,7 +1228,7 @@ moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
*/
static __inline
-void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa)
+void moea64_set_scratchpage_pa(int which, vm_paddr_t pa)
{
struct pvo_entry *pvo;
@@ -1242,13 +1239,13 @@ void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa)
PMAP_LOCK(pvo->pvo_pmap);
pvo->pvo_pte.pa =
moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
- MOEA64_PTE_REPLACE(mmup, pvo, MOEA64_PTE_INVALIDATE);
+ moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
PMAP_UNLOCK(pvo->pvo_pmap);
isync();
}
void
-moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
+moea64_copy_page(vm_page_t msrc, vm_page_t mdst)
{
vm_offset_t dst;
vm_offset_t src;
@@ -1262,10 +1259,10 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
} else {
mtx_lock(&moea64_scratchpage_mtx);
- moea64_set_scratchpage_pa(mmu, 0, src);
- moea64_set_scratchpage_pa(mmu, 1, dst);
+ moea64_set_scratchpage_pa(0, src);
+ moea64_set_scratchpage_pa(1, dst);
- bcopy((void *)moea64_scratchpage_va[0],
+ bcopy((void *)moea64_scratchpage_va[0],
(void *)moea64_scratchpage_va[1], PAGE_SIZE);
mtx_unlock(&moea64_scratchpage_mtx);
@@ -1273,7 +1270,7 @@ moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
}
static inline void
-moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
{
void *a_cp, *b_cp;
@@ -1299,7 +1296,7 @@ moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
}
static inline void
-moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+moea64_copy_pages_nodmap(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
{
void *a_cp, *b_cp;
@@ -1310,12 +1307,12 @@ moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
- moea64_set_scratchpage_pa(mmu, 0,
+ moea64_set_scratchpage_pa(0,
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
b_pg_offset = b_offset & PAGE_MASK;
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
- moea64_set_scratchpage_pa(mmu, 1,
+ moea64_set_scratchpage_pa(1,
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
bcopy(a_cp, b_cp, cnt);
@@ -1327,21 +1324,21 @@ moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
}
void
-moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
{
if (hw_direct_map) {
- moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
+ moea64_copy_pages_dmap(ma, a_offset, mb, b_offset,
xfersize);
} else {
- moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
+ moea64_copy_pages_nodmap(ma, a_offset, mb, b_offset,
xfersize);
}
}
void
-moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
+moea64_zero_page_area(vm_page_t m, int off, int size)
{
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
@@ -1352,7 +1349,7 @@ moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size);
} else {
mtx_lock(&moea64_scratchpage_mtx);
- moea64_set_scratchpage_pa(mmu, 0, pa);
+ moea64_set_scratchpage_pa(0, pa);
bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
mtx_unlock(&moea64_scratchpage_mtx);
}
@@ -1362,7 +1359,7 @@ moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
* Zero a page of physical memory by temporarily mapping it
*/
void
-moea64_zero_page(mmu_t mmu, vm_page_t m)
+moea64_zero_page(vm_page_t m)
{
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
vm_offset_t va, off;
@@ -1370,7 +1367,7 @@ moea64_zero_page(mmu_t mmu, vm_page_t m)
if (!hw_direct_map) {
mtx_lock(&moea64_scratchpage_mtx);
- moea64_set_scratchpage_pa(mmu, 0, pa);
+ moea64_set_scratchpage_pa(0, pa);
va = moea64_scratchpage_va[0];
} else {
va = PHYS_TO_DMAP(pa);
@@ -1384,7 +1381,7 @@ moea64_zero_page(mmu_t mmu, vm_page_t m)
}
vm_offset_t
-moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
+moea64_quick_enter_page(vm_page_t m)
{
struct pvo_entry *pvo;
vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
@@ -1406,14 +1403,14 @@ moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
mtx_lock(PCPU_PTR(aim.qmap_lock));
pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
(uint64_t)pa;
- MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
+ moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
isync();
return (PCPU_GET(qmap_addr));
}
void
-moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
+moea64_quick_remove_page(vm_offset_t addr)
{
if (hw_direct_map)
return;
@@ -1426,7 +1423,7 @@ moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
}
boolean_t
-moea64_page_is_mapped(mmu_t mmu, vm_page_t m)
+moea64_page_is_mapped(vm_page_t m)
{
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
}
@@ -1438,7 +1435,7 @@ moea64_page_is_mapped(mmu_t mmu, vm_page_t m)
*/
int
-moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, u_int flags, int8_t psind)
{
struct pvo_entry *pvo, *oldpvo;
@@ -1481,7 +1478,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
(m->oflags & VPO_UNMANAGED) == 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
- error = moea64_pvo_enter(mmu, pvo, pvo_head, &oldpvo);
+ error = moea64_pvo_enter(pvo, pvo_head, &oldpvo);
if (error == EEXIST) {
if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
@@ -1490,9 +1487,9 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
error = 0;
/* If not in page table, reinsert it */
- if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
+ if (moea64_pte_synch(oldpvo) < 0) {
STAT_MOEA64(moea64_pte_overflow--);
- MOEA64_PTE_INSERT(mmu, oldpvo);
+ moea64_pte_insert(oldpvo);
}
/* Then just clean up and go home */
@@ -1504,8 +1501,8 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
/* Otherwise, need to kill it first */
KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
"mapping does not match new mapping"));
- moea64_pvo_remove_from_pmap(mmu, oldpvo);
- moea64_pvo_enter(mmu, pvo, pvo_head, NULL);
+ moea64_pvo_remove_from_pmap(oldpvo);
+ moea64_pvo_enter(pvo, pvo_head, NULL);
}
}
PMAP_UNLOCK(pmap);
@@ -1513,7 +1510,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
/* Free any dead pages */
if (error == EEXIST) {
- moea64_pvo_remove_from_page(mmu, oldpvo);
+ moea64_pvo_remove_from_page(oldpvo);
free_pvo_entry(oldpvo);
}
@@ -1525,13 +1522,13 @@ out:
if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
(pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
vm_page_aflag_set(m, PGA_EXECUTABLE);
- moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
+ moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
}
return (KERN_SUCCESS);
}
static void
-moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
+moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
vm_size_t sz)
{
@@ -1559,8 +1556,8 @@ moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
mtx_lock(&moea64_scratchpage_mtx);
- moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
- __syncicache((void *)(moea64_scratchpage_va[1] +
+ moea64_set_scratchpage_pa(1, pa & ~ADDR_POFF);
+ __syncicache((void *)(moea64_scratchpage_va[1] +
(va & ADDR_POFF)), sz);
mtx_unlock(&moea64_scratchpage_mtx);
@@ -1580,7 +1577,7 @@ moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
* corresponding offset from m_start are mapped.
*/
void
-moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
+moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
vm_page_t m;
@@ -1591,7 +1588,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
psize = atop(end - start);
m = m_start;
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
+ moea64_enter(pm, start + ptoa(diff), m, prot &
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
PMAP_ENTER_QUICK_LOCKED, 0);
m = TAILQ_NEXT(m, listq);
@@ -1599,16 +1596,16 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
}
void
-moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
+moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
- moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
+ moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
}
vm_paddr_t
-moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
+moea64_extract(pmap_t pm, vm_offset_t va)
{
struct pvo_entry *pvo;
vm_paddr_t pa;
@@ -1630,11 +1627,11 @@ moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
* protection.
*/
vm_page_t
-moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+moea64_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
struct pvo_entry *pvo;
vm_page_t m;
-
+
m = NULL;
PMAP_LOCK(pmap);
pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
@@ -1647,8 +1644,6 @@ moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
return (m);
}
-static mmu_t installed_mmu;
-
static void *
moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
uint8_t *flags, int wait)
@@ -1686,7 +1681,7 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
init_pvo_entry(pvo, kernel_pmap, va);
pvo->pvo_vaddr |= PVO_WIRED;
- moea64_pvo_enter(installed_mmu, pvo, NULL, NULL);
+ moea64_pvo_enter(pvo, NULL, NULL);
if (needed_lock)
PMAP_UNLOCK(kernel_pmap);
@@ -1700,7 +1695,7 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
extern int elf32_nxstack;
void
-moea64_init(mmu_t mmu)
+moea64_init()
{
CTR0(KTR_PMAP, "moea64_init");
@@ -1710,7 +1705,6 @@ moea64_init(mmu_t mmu)
UMA_ZONE_VM | UMA_ZONE_NOFREE);
if (!hw_direct_map) {
- installed_mmu = mmu;
uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
}
@@ -1722,17 +1716,17 @@ moea64_init(mmu_t mmu)
}
boolean_t
-moea64_is_referenced(mmu_t mmu, vm_page_t m)
+moea64_is_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea64_is_referenced: page %p is not managed", m));
- return (moea64_query_bit(mmu, m, LPTE_REF));
+ return (moea64_query_bit(m, LPTE_REF));
}
boolean_t
-moea64_is_modified(mmu_t mmu, vm_page_t m)
+moea64_is_modified(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
@@ -1744,11 +1738,11 @@ moea64_is_modified(mmu_t mmu, vm_page_t m)
if (!pmap_page_is_write_mapped(m))
return (FALSE);
- return (moea64_query_bit(mmu, m, LPTE_CHG));
+ return (moea64_query_bit(m, LPTE_CHG));
}
boolean_t
-moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+moea64_is_prefaultable(pmap_t pmap, vm_offset_t va)
{
struct pvo_entry *pvo;
boolean_t rv = TRUE;
@@ -1762,7 +1756,7 @@ moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
}
void
-moea64_clear_modify(mmu_t mmu, vm_page_t m)
+moea64_clear_modify(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
@@ -1771,14 +1765,14 @@ moea64_clear_modify(mmu_t mmu, vm_page_t m)
if (!pmap_page_is_write_mapped(m))
return;
- moea64_clear_bit(mmu, m, LPTE_CHG);
+ moea64_clear_bit(m, LPTE_CHG);
}
/*
* Clear the write and modified bits in each of the given page's mappings.
*/
void
-moea64_remove_write(mmu_t mmu, vm_page_t m)
+moea64_remove_write(vm_page_t m)
{
struct pvo_entry *pvo;
int64_t refchg, ret;
@@ -1800,8 +1794,7 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
if (!(pvo->pvo_vaddr & PVO_DEAD) &&
(pvo->pvo_pte.prot & VM_PROT_WRITE)) {
pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
- ret = MOEA64_PTE_REPLACE(mmu, pvo,
- MOEA64_PTE_PROT_UPDATE);
+ ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
if (ret < 0)
ret = LPTE_CHG;
refchg |= ret;
@@ -1829,19 +1822,19 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
* optimal aging of shared pages.
*/
int
-moea64_ts_referenced(mmu_t mmu, vm_page_t m)
+moea64_ts_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea64_ts_referenced: page %p is not managed", m));
- return (moea64_clear_bit(mmu, m, LPTE_REF));
+ return (moea64_clear_bit(m, LPTE_REF));
}
/*
* Modify the WIMG settings of all mappings for a page.
*/
void
-moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
+moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
struct pvo_entry *pvo;
int64_t refchg;
@@ -1862,8 +1855,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
if (!(pvo->pvo_vaddr & PVO_DEAD)) {
pvo->pvo_pte.pa &= ~LPTE_WIMG;
pvo->pvo_pte.pa |= lo;
- refchg = MOEA64_PTE_REPLACE(mmu, pvo,
- MOEA64_PTE_INVALIDATE);
+ refchg = moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
if (refchg < 0)
refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
LPTE_CHG : 0;
@@ -1889,7 +1881,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
* Map a wired page into kernel virtual address space.
*/
void
-moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
+moea64_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
int error;
struct pvo_entry *pvo, *oldpvo;
@@ -1906,14 +1898,14 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
PMAP_LOCK(kernel_pmap);
oldpvo = moea64_pvo_find_va(kernel_pmap, va);
if (oldpvo != NULL)
- moea64_pvo_remove_from_pmap(mmu, oldpvo);
+ moea64_pvo_remove_from_pmap(oldpvo);
init_pvo_entry(pvo, kernel_pmap, va);
- error = moea64_pvo_enter(mmu, pvo, NULL, NULL);
+ error = moea64_pvo_enter(pvo, NULL, NULL);
PMAP_UNLOCK(kernel_pmap);
/* Free any dead pages */
if (oldpvo != NULL) {
- moea64_pvo_remove_from_page(mmu, oldpvo);
+ moea64_pvo_remove_from_page(oldpvo);
free_pvo_entry(oldpvo);
}
@@ -1923,10 +1915,10 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
}
void
-moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
+moea64_kenter(vm_offset_t va, vm_paddr_t pa)
{
- moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+ moea64_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
}
/*
@@ -1934,7 +1926,7 @@ moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
* address.
*/
vm_paddr_t
-moea64_kextract(mmu_t mmu, vm_offset_t va)
+moea64_kextract(vm_offset_t va)
{
struct pvo_entry *pvo;
vm_paddr_t pa;
@@ -1960,9 +1952,9 @@ moea64_kextract(mmu_t mmu, vm_offset_t va)
* Remove a wired page from kernel virtual address space.
*/
void
-moea64_kremove(mmu_t mmu, vm_offset_t va)
+moea64_kremove(vm_offset_t va)
{
- moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
+ moea64_remove(kernel_pmap, va, va + PAGE_SIZE);
}
/*
@@ -1971,7 +1963,7 @@ moea64_kremove(mmu_t mmu, vm_offset_t va)
* called in this thread. This is used internally in copyin/copyout.
*/
static int
-moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
+moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr,
void **kaddr, size_t ulen, size_t *klen)
{
size_t l;
@@ -2014,7 +2006,7 @@ moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
/* If we have already set this VSID, we can just return */
if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv)
return (0);
-
+
__asm __volatile("isync");
curthread->td_pcb->pcb_cpu.aim.usr_segm =
(uintptr_t)uaddr >> ADDR_SR_SHFT;
@@ -2035,7 +2027,7 @@ moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
* address space.
*/
static int
-moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user,
vm_offset_t *decoded_addr)
{
vm_offset_t user_sr;
@@ -2064,7 +2056,7 @@ moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
* update '*virt' with the first usable address after the mapped region.
*/
vm_offset_t
-moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
+moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
vm_paddr_t pa_end, int prot)
{
vm_offset_t sva, va;
@@ -2086,7 +2078,7 @@ moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
va = sva;
/* XXX respect prot argument */
for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
- moea64_kenter(mmu, va, pa_start);
+ moea64_kenter(va, pa_start);
*virt = va;
return (sva);
@@ -2100,7 +2092,7 @@ moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
* subset of pmaps for proper page aging.
*/
boolean_t
-moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
+moea64_page_exists_quick(pmap_t pmap, vm_page_t m)
{
int loops;
struct pvo_entry *pvo;
@@ -2124,7 +2116,7 @@ moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
}
void
-moea64_page_init(mmu_t mmu __unused, vm_page_t m)
+moea64_page_init(vm_page_t m)
{
m->md.mdpg_attrs = 0;
@@ -2137,7 +2129,7 @@ moea64_page_init(mmu_t mmu __unused, vm_page_t m)
* that are wired.
*/
int
-moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
+moea64_page_wired_mappings(vm_page_t m)
{
struct pvo_entry *pvo;
int count;
@@ -2208,8 +2200,8 @@ moea64_get_unique_vsid(void) {
}
#ifdef __powerpc64__
-void
-moea64_pinit(mmu_t mmu, pmap_t pmap)
+int
+moea64_pinit(pmap_t pmap)
{
RB_INIT(&pmap->pmap_pvo);
@@ -2217,10 +2209,12 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
pmap->pm_slb_tree_root = slb_alloc_tree();
pmap->pm_slb = slb_alloc_user_cache();
pmap->pm_slb_len = 0;
+
+ return (1);
}
#else
-void
-moea64_pinit(mmu_t mmu, pmap_t pmap)
+int
+moea64_pinit(pmap_t pmap)
{
int i;
uint32_t hash;
@@ -2228,8 +2222,7 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
RB_INIT(&pmap->pmap_pvo);
if (pmap_bootstrapped)
- pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
- (vm_offset_t)pmap);
+ pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap);
else
pmap->pmap_phys = pmap;
@@ -2238,10 +2231,12 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
*/
hash = moea64_get_unique_vsid();
- for (i = 0; i < 16; i++)
+ for (i = 0; i < 16; i++)
pmap->pm_sr[i] = VSID_MAKE(i, hash);
KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
+
+ return (1);
}
#endif
@@ -2249,11 +2244,11 @@ moea64_pinit(mmu_t mmu, pmap_t pmap)
* Initialize the pmap associated with process 0.
*/
void
-moea64_pinit0(mmu_t mmu, pmap_t pm)
+moea64_pinit0(pmap_t pm)
{
PMAP_LOCK_INIT(pm);
- moea64_pinit(mmu, pm);
+ moea64_pinit(pm);
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
}
@@ -2261,7 +2256,7 @@ moea64_pinit0(mmu_t mmu, pmap_t pm)
* Set the physical protection on the specified range of this map as requested.
*/
static void
-moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
+moea64_pvo_protect( pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
{
struct vm_page *pg;
vm_prot_t oldprot;
@@ -2279,7 +2274,7 @@ moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
/*
* If the PVO is in the page table, update mapping
*/
- refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
+ refchg = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
if (refchg < 0)
refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
@@ -2288,7 +2283,7 @@ moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
(pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
if ((pg->oflags & VPO_UNMANAGED) == 0)
vm_page_aflag_set(pg, PGA_EXECUTABLE);
- moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
+ moea64_syncicache(pm, PVO_VADDR(pvo),
pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
}
@@ -2307,7 +2302,7 @@ moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
}
void
-moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
+moea64_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
vm_prot_t prot)
{
struct pvo_entry *pvo, *tpvo, key;
@@ -2319,7 +2314,7 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
("moea64_protect: non current pmap"));
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
- moea64_remove(mmu, pm, sva, eva);
+ moea64_remove(pm, sva, eva);
return;
}
@@ -2328,7 +2323,7 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
- moea64_pvo_protect(mmu, pm, pvo, prot);
+ moea64_pvo_protect(pm, pvo, prot);
}
PMAP_UNLOCK(pm);
}
@@ -2339,10 +2334,10 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
* references recorded. Existing mappings in the region are overwritten.
*/
void
-moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
+moea64_qenter(vm_offset_t va, vm_page_t *m, int count)
{
while (count-- > 0) {
- moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
+ moea64_kenter(va, VM_PAGE_TO_PHYS(*m));
va += PAGE_SIZE;
m++;
}
@@ -2353,10 +2348,10 @@ moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
* temporary mappings entered by moea64_qenter.
*/
void
-moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
+moea64_qremove(vm_offset_t va, int count)
{
while (count-- > 0) {
- moea64_kremove(mmu, va);
+ moea64_kremove(va);
va += PAGE_SIZE;
}
}
@@ -2378,9 +2373,9 @@ moea64_release_vsid(uint64_t vsid)
void
-moea64_release(mmu_t mmu, pmap_t pmap)
+moea64_release(pmap_t pmap)
{
-
+
/*
* Free segment registers' VSIDs
*/
@@ -2398,7 +2393,7 @@ moea64_release(mmu_t mmu, pmap_t pmap)
* Remove all pages mapped by the specified pmap
*/
void
-moea64_remove_pages(mmu_t mmu, pmap_t pm)
+moea64_remove_pages(pmap_t pm)
{
struct pvo_entry *pvo, *tpvo;
struct pvo_dlist tofree;
@@ -2415,7 +2410,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm)
* pmap, but save delinking from the vm_page for a second
* pass
*/
- moea64_pvo_remove_from_pmap(mmu, pvo);
+ moea64_pvo_remove_from_pmap(pvo);
SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
}
PMAP_UNLOCK(pm);
@@ -2423,7 +2418,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm)
while (!SLIST_EMPTY(&tofree)) {
pvo = SLIST_FIRST(&tofree);
SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
- moea64_pvo_remove_from_page(mmu, pvo);
+ moea64_pvo_remove_from_page(pvo);
free_pvo_entry(pvo);
}
}
@@ -2432,7 +2427,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm)
* Remove the given range of addresses from the specified map.
*/
void
-moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
+moea64_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
struct pvo_entry *pvo, *tpvo, key;
struct pvo_dlist tofree;
@@ -2457,7 +2452,7 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
* pmap, but save delinking from the vm_page for a second
* pass
*/
- moea64_pvo_remove_from_pmap(mmu, pvo);
+ moea64_pvo_remove_from_pmap(pvo);
SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
}
PMAP_UNLOCK(pm);
@@ -2465,7 +2460,7 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
while (!SLIST_EMPTY(&tofree)) {
pvo = SLIST_FIRST(&tofree);
SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
- moea64_pvo_remove_from_page(mmu, pvo);
+ moea64_pvo_remove_from_page(pvo);
free_pvo_entry(pvo);
}
}
@@ -2475,7 +2470,7 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
* will reflect changes in pte's back to the vm_page.
*/
void
-moea64_remove_all(mmu_t mmu, vm_page_t m)
+moea64_remove_all(vm_page_t m)
{
struct pvo_entry *pvo, *next_pvo;
struct pvo_head freequeue;
@@ -2490,8 +2485,8 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
PMAP_LOCK(pmap);
wasdead = (pvo->pvo_vaddr & PVO_DEAD);
if (!wasdead)
- moea64_pvo_remove_from_pmap(mmu, pvo);
- moea64_pvo_remove_from_page_locked(mmu, pvo, m);
+ moea64_pvo_remove_from_pmap(pvo);
+ moea64_pvo_remove_from_page_locked(pvo, m);
if (!wasdead)
LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
PMAP_UNLOCK(pmap);
@@ -2553,7 +2548,7 @@ moea64_bootstrap_alloc(vm_size_t size, vm_size_t align)
}
static int
-moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
+moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head,
struct pvo_entry **oldpvop)
{
struct pvo_entry *old_pvo;
@@ -2585,7 +2580,7 @@ moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
/*
* Insert it into the hardware page table
*/
- err = MOEA64_PTE_INSERT(mmu, pvo);
+ err = moea64_pte_insert(pvo);
if (err != 0) {
panic("moea64_pvo_enter: overflow");
}
@@ -2609,7 +2604,7 @@ moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head,
}
static void
-moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
+moea64_pvo_remove_from_pmap(struct pvo_entry *pvo)
{
struct vm_page *pg;
int32_t refchg;
@@ -2621,7 +2616,7 @@ moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
/*
* If there is an active pte entry, we need to deactivate it
*/
- refchg = MOEA64_PTE_UNSET(mmu, pvo);
+ refchg = moea64_pte_unset(pvo);
if (refchg < 0) {
/*
* If it was evicted from the page table, be pessimistic and
@@ -2665,7 +2660,7 @@ moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
}
static inline void
-moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
+moea64_pvo_remove_from_page_locked(struct pvo_entry *pvo,
vm_page_t m)
{
@@ -2694,7 +2689,7 @@ moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
}
static void
-moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
+moea64_pvo_remove_from_page(struct pvo_entry *pvo)
{
vm_page_t pg = NULL;
@@ -2702,7 +2697,7 @@ moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
- moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
+ moea64_pvo_remove_from_page_locked(pvo, pg);
PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
}
@@ -2718,7 +2713,7 @@ moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
}
static boolean_t
-moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
+moea64_query_bit(vm_page_t m, uint64_t ptebit)
{
struct pvo_entry *pvo;
int64_t ret;
@@ -2747,7 +2742,7 @@ moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
*/
PMAP_LOCK(pvo->pvo_pmap);
if (!(pvo->pvo_vaddr & PVO_DEAD))
- ret = MOEA64_PTE_SYNCH(mmu, pvo);
+ ret = moea64_pte_synch(pvo);
PMAP_UNLOCK(pvo->pvo_pmap);
if (ret > 0) {
@@ -2765,7 +2760,7 @@ moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
}
static u_int
-moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
+moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
{
u_int count;
struct pvo_entry *pvo;
@@ -2787,7 +2782,7 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
PMAP_LOCK(pvo->pvo_pmap);
if (!(pvo->pvo_vaddr & PVO_DEAD))
- ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
+ ret = moea64_pte_clear(pvo, ptebit);
PMAP_UNLOCK(pvo->pvo_pmap);
if (ret > 0 && (ret & ptebit))
@@ -2800,7 +2795,7 @@ moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
}
boolean_t
-moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+moea64_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
{
struct pvo_entry *pvo, key;
vm_offset_t ppa;
@@ -2832,7 +2827,7 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
* NOT real memory.
*/
void *
-moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
+moea64_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, ppa, offset;
@@ -2846,7 +2841,7 @@ moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
- moea64_kenter_attr(mmu, tmpva, ppa, ma);
+ moea64_kenter_attr(tmpva, ppa, ma);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
ppa += PAGE_SIZE;
@@ -2856,14 +2851,14 @@ moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
}
void *
-moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+moea64_mapdev(vm_paddr_t pa, vm_size_t size)
{
- return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
+ return moea64_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT);
}
void
-moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
+moea64_unmapdev(vm_offset_t va, vm_size_t size)
{
vm_offset_t base, offset;
@@ -2875,7 +2870,7 @@ moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
}
void
-moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+moea64_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
struct pvo_entry *pvo;
vm_offset_t lim;
@@ -2892,7 +2887,7 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
- moea64_syncicache(mmu, pm, va, pa, len);
+ moea64_syncicache(pm, va, pa, len);
}
va += len;
sz -= len;
@@ -2901,7 +2896,7 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
}
void
-moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
+moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
{
*va = (void *)(uintptr_t)pa;
@@ -2910,7 +2905,7 @@ moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
void
-moea64_scan_init(mmu_t mmu)
+moea64_scan_init()
{
struct pvo_entry *pvo;
vm_offset_t va;
@@ -2973,7 +2968,7 @@ moea64_scan_init(mmu_t mmu)
#ifdef __powerpc64__
static size_t
-moea64_scan_pmap(mmu_t mmu)
+moea64_scan_pmap()
{
struct pvo_entry *pvo;
vm_paddr_t pa, pa_end;
@@ -3031,7 +3026,7 @@ moea64_scan_pmap(mmu_t mmu)
static struct dump_context dump_ctx;
static void *
-moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
+moea64_dump_pmap_init(unsigned blkpgs)
{
dump_ctx.ptex = 0;
dump_ctx.ptex_end = moea64_pteg_count * 8;
@@ -3042,13 +3037,13 @@ moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
#else
static size_t
-moea64_scan_pmap(mmu_t mmu)
+moea64_scan_pmap()
{
return (0);
}
static void *
-moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
+moea64_dump_pmap_init(unsigned blkpgs)
{
return (NULL);
}
@@ -3057,7 +3052,7 @@ moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
#ifdef __powerpc64__
static void
-moea64_map_range(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
+moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
{
for (; npages > 0; --npages) {
@@ -3066,13 +3061,13 @@ moea64_map_range(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
(va & moea64_large_page_mask) == 0 &&
npages >= (moea64_large_page_size >> PAGE_SHIFT)) {
PMAP_LOCK(kernel_pmap);
- moea64_kenter_large(mmu, va, pa, 0, 0);
+ moea64_kenter_large(va, pa, 0, 0);
PMAP_UNLOCK(kernel_pmap);
pa += moea64_large_page_size;
va += moea64_large_page_size;
npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1;
} else {
- moea64_kenter(mmu, va, pa);
+ moea64_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -3080,7 +3075,7 @@ moea64_map_range(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
}
static void
-moea64_page_array_startup(mmu_t mmu, long pages)
+moea64_page_array_startup(long pages)
{
long dom_pages[MAXMEMDOM];
vm_paddr_t pa;
@@ -3096,7 +3091,7 @@ moea64_page_array_startup(mmu_t mmu, long pages)
if (vm_ndomains == 1) {
size = round_page(pages * sizeof(struct vm_page));
pa = vm_phys_early_alloc(0, size);
- vm_page_base = moea64_map(mmu, &vm_page_base,
+ vm_page_base = moea64_map(&vm_page_base,
pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
vm_page_array_size = pages;
vm_page_array = (vm_page_t)vm_page_base;
@@ -3140,7 +3135,7 @@ moea64_page_array_startup(mmu_t mmu, long pages)
size = roundup2(size, moea64_large_page_size);
pa = vm_phys_early_alloc(i, size);
vm_page_array_size += size / sizeof(struct vm_page);
- moea64_map_range(mmu, va, pa, size >> PAGE_SHIFT);
+ moea64_map_range(va, pa, size >> PAGE_SHIFT);
/* Scoot up domain 0, to reduce the domain page overlap. */
if (i == 0)
vm_page_base += size - needed;
@@ -3150,3 +3145,38 @@ moea64_page_array_startup(mmu_t mmu, long pages)
vm_page_array_size = pages;
}
#endif
+
+static int64_t
+moea64_null_method(void)
+{
+ return (0);
+}
+
+static int64_t moea64_pte_replace_default(struct pvo_entry *pvo, int flags)
+{
+ int64_t refchg;
+
+ refchg = moea64_pte_unset(pvo);
+ moea64_pte_insert(pvo);
+
+ return (refchg);
+}
+
+struct moea64_funcs *moea64_ops;
+
+#define DEFINE_OEA64_IFUNC(ret, func, args, def) \
+ DEFINE_IFUNC(, ret, moea64_##func, args) { \
+ moea64_##func##_t f; \
+ if (moea64_ops == NULL) \
+ return ((moea64_##func##_t)def); \
+ f = moea64_ops->func; \
+ return (f != NULL ? f : (moea64_##func##_t)def);\
+ }
+
+DEFINE_OEA64_IFUNC(int64_t, pte_replace, (struct pvo_entry *, int),
+ moea64_pte_replace_default)
+DEFINE_OEA64_IFUNC(int64_t, pte_insert, (struct pvo_entry *), moea64_null_method)
+DEFINE_OEA64_IFUNC(int64_t, pte_unset, (struct pvo_entry *), moea64_null_method)
+DEFINE_OEA64_IFUNC(int64_t, pte_clear, (struct pvo_entry *, uint64_t),
+ moea64_null_method)
+DEFINE_OEA64_IFUNC(int64_t, pte_synch, (struct pvo_entry *), moea64_null_method)
diff --git a/sys/powerpc/aim/mmu_oea64.h b/sys/powerpc/aim/mmu_oea64.h
index 06183703949f..e529475e93e9 100644
--- a/sys/powerpc/aim/mmu_oea64.h
+++ b/sys/powerpc/aim/mmu_oea64.h
@@ -32,6 +32,7 @@
#include "opt_pmap.h"
+#include <vm/vm_extern.h>
#include <machine/mmuvar.h>
struct dump_context {
@@ -40,7 +41,7 @@ struct dump_context {
size_t blksz;
};
-extern mmu_def_t oea64_mmu;
+extern const struct mmu_kobj oea64_mmu;
/*
* Helper routines
@@ -69,13 +70,36 @@ void moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte);
* moea64_late_bootstrap();
*/
-void moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
+void moea64_early_bootstrap(vm_offset_t kernelstart,
vm_offset_t kernelend);
-void moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
+void moea64_mid_bootstrap(vm_offset_t kernelstart,
vm_offset_t kernelend);
-void moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
+void moea64_late_bootstrap(vm_offset_t kernelstart,
vm_offset_t kernelend);
+int64_t moea64_pte_replace(struct pvo_entry *, int);
+int64_t moea64_pte_insert(struct pvo_entry *);
+int64_t moea64_pte_unset(struct pvo_entry *);
+int64_t moea64_pte_clear(struct pvo_entry *, uint64_t);
+int64_t moea64_pte_synch(struct pvo_entry *);
+
+
+typedef int64_t (*moea64_pte_replace_t)(struct pvo_entry *, int);
+typedef int64_t (*moea64_pte_insert_t)(struct pvo_entry *);
+typedef int64_t (*moea64_pte_unset_t)(struct pvo_entry *);
+typedef int64_t (*moea64_pte_clear_t)(struct pvo_entry *, uint64_t);
+typedef int64_t (*moea64_pte_synch_t)(struct pvo_entry *);
+
+struct moea64_funcs {
+ moea64_pte_replace_t pte_replace;
+ moea64_pte_insert_t pte_insert;
+ moea64_pte_unset_t pte_unset;
+ moea64_pte_clear_t pte_clear;
+ moea64_pte_synch_t pte_synch;
+};
+
+extern struct moea64_funcs *moea64_ops;
+
static inline uint64_t
moea64_pte_vpn_from_pvo_vpn(const struct pvo_entry *pvo)
{
diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c
index 1d010e89bf77..0bcd544ad630 100644
--- a/sys/powerpc/aim/mmu_radix.c
+++ b/sys/powerpc/aim/mmu_radix.c
@@ -406,152 +406,145 @@ static u_int64_t KPTphys; /* phys addr of kernel level 1 */
static vm_offset_t qframe = 0;
static struct mtx qframe_mtx;
-void mmu_radix_activate(mmu_t mmu, struct thread *);
-void mmu_radix_advise(mmu_t mmu, pmap_t, vm_offset_t, vm_offset_t, int);
-void mmu_radix_align_superpage(mmu_t mmu, vm_object_t, vm_ooffset_t, vm_offset_t *,
+void mmu_radix_activate(struct thread *);
+void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int);
+void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,
vm_size_t);
-void mmu_radix_clear_modify(mmu_t, vm_page_t);
-void mmu_radix_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
-int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm,
+void mmu_radix_clear_modify(vm_page_t);
+void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
+int mmu_radix_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
-int mmu_radix_decode_kernel_ptr(mmu_t, vm_offset_t, int *, vm_offset_t *);
-int mmu_radix_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
-void mmu_radix_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
+int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *);
+int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t);
+void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
vm_prot_t);
-void mmu_radix_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
-vm_paddr_t mmu_radix_extract(mmu_t, pmap_t pmap, vm_offset_t va);
-vm_page_t mmu_radix_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
-void mmu_radix_kenter(mmu_t, vm_offset_t, vm_paddr_t);
-vm_paddr_t mmu_radix_kextract(mmu_t, vm_offset_t);
-void mmu_radix_kremove(mmu_t, vm_offset_t);
-boolean_t mmu_radix_is_modified(mmu_t, vm_page_t);
-boolean_t mmu_radix_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
-boolean_t mmu_radix_is_referenced(mmu_t, vm_page_t);
-void mmu_radix_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t,
+void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va);
+vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
+void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
+vm_paddr_t mmu_radix_kextract(vm_offset_t);
+void mmu_radix_kremove(vm_offset_t);
+boolean_t mmu_radix_is_modified(vm_page_t);
+boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
+boolean_t mmu_radix_is_referenced(vm_page_t);
+void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
vm_pindex_t, vm_size_t);
-boolean_t mmu_radix_page_exists_quick(mmu_t, pmap_t, vm_page_t);
-void mmu_radix_page_init(mmu_t, vm_page_t);
-boolean_t mmu_radix_page_is_mapped(mmu_t, vm_page_t m);
-void mmu_radix_page_set_memattr(mmu_t, vm_page_t, vm_memattr_t);
-int mmu_radix_page_wired_mappings(mmu_t, vm_page_t);
-void mmu_radix_pinit(mmu_t, pmap_t);
-void mmu_radix_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
-boolean_t mmu_radix_ps_enabled(mmu_t, pmap_t);
-void mmu_radix_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
-void mmu_radix_qremove(mmu_t, vm_offset_t, int);
-vm_offset_t mmu_radix_quick_enter_page(mmu_t, vm_page_t);
-void mmu_radix_quick_remove_page(mmu_t, vm_offset_t);
-boolean_t mmu_radix_ts_referenced(mmu_t, vm_page_t);
-void mmu_radix_release(mmu_t, pmap_t);
-void mmu_radix_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void mmu_radix_remove_all(mmu_t, vm_page_t);
-void mmu_radix_remove_pages(mmu_t, pmap_t);
-void mmu_radix_remove_write(mmu_t, vm_page_t);
-void mmu_radix_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void mmu_radix_zero_page(mmu_t, vm_page_t);
-void mmu_radix_zero_page_area(mmu_t, vm_page_t, int, int);
-int mmu_radix_change_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
-void mmu_radix_page_array_startup(mmu_t mmu, long pages);
+boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
+void mmu_radix_page_init(vm_page_t);
+boolean_t mmu_radix_page_is_mapped(vm_page_t m);
+void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
+int mmu_radix_page_wired_mappings(vm_page_t);
+int mmu_radix_pinit(pmap_t);
+void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+bool mmu_radix_ps_enabled(pmap_t);
+void mmu_radix_qenter(vm_offset_t, vm_page_t *, int);
+void mmu_radix_qremove(vm_offset_t, int);
+vm_offset_t mmu_radix_quick_enter_page(vm_page_t);
+void mmu_radix_quick_remove_page(vm_offset_t);
+boolean_t mmu_radix_ts_referenced(vm_page_t);
+void mmu_radix_release(pmap_t);
+void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t);
+void mmu_radix_remove_all(vm_page_t);
+void mmu_radix_remove_pages(pmap_t);
+void mmu_radix_remove_write(vm_page_t);
+void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t);
+void mmu_radix_zero_page(vm_page_t);
+void mmu_radix_zero_page_area(vm_page_t, int, int);
+int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
+void mmu_radix_page_array_startup(long pages);
#include "mmu_oea64.h"
-#include "mmu_if.h"
-#include "moea64_if.h"
/*
* Kernel MMU interface
*/
-static void mmu_radix_bootstrap(mmu_t mmup,
- vm_offset_t kernelstart, vm_offset_t kernelend);
+static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t);
-static void mmu_radix_copy_page(mmu_t, vm_page_t, vm_page_t);
-static void mmu_radix_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+static void mmu_radix_copy_page(vm_page_t, vm_page_t);
+static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
-static void mmu_radix_growkernel(mmu_t, vm_offset_t);
-static void mmu_radix_init(mmu_t);
-static int mmu_radix_mincore(mmu_t, pmap_t, vm_offset_t, vm_paddr_t *);
-static vm_offset_t mmu_radix_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
-static void mmu_radix_pinit0(mmu_t, pmap_t);
-
-static void *mmu_radix_mapdev(mmu_t, vm_paddr_t, vm_size_t);
-static void *mmu_radix_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
-static void mmu_radix_unmapdev(mmu_t, vm_offset_t, vm_size_t);
-static void mmu_radix_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
-static boolean_t mmu_radix_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
-static void mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
- void **va);
-static void mmu_radix_scan_init(mmu_t mmu);
-static void mmu_radix_cpu_bootstrap(mmu_t, int ap);
-static void mmu_radix_tlbie_all(mmu_t);
-
-static mmu_method_t mmu_radix_methods[] = {
- MMUMETHOD(mmu_bootstrap, mmu_radix_bootstrap),
- MMUMETHOD(mmu_copy_page, mmu_radix_copy_page),
- MMUMETHOD(mmu_copy_pages, mmu_radix_copy_pages),
- MMUMETHOD(mmu_cpu_bootstrap, mmu_radix_cpu_bootstrap),
- MMUMETHOD(mmu_growkernel, mmu_radix_growkernel),
- MMUMETHOD(mmu_init, mmu_radix_init),
- MMUMETHOD(mmu_map, mmu_radix_map),
- MMUMETHOD(mmu_mincore, mmu_radix_mincore),
- MMUMETHOD(mmu_pinit, mmu_radix_pinit),
- MMUMETHOD(mmu_pinit0, mmu_radix_pinit0),
-
- MMUMETHOD(mmu_mapdev, mmu_radix_mapdev),
- MMUMETHOD(mmu_mapdev_attr, mmu_radix_mapdev_attr),
- MMUMETHOD(mmu_unmapdev, mmu_radix_unmapdev),
- MMUMETHOD(mmu_kenter_attr, mmu_radix_kenter_attr),
- MMUMETHOD(mmu_dev_direct_mapped,mmu_radix_dev_direct_mapped),
- MMUMETHOD(mmu_scan_init, mmu_radix_scan_init),
- MMUMETHOD(mmu_dumpsys_map, mmu_radix_dumpsys_map),
- MMUMETHOD(mmu_page_is_mapped, mmu_radix_page_is_mapped),
- MMUMETHOD(mmu_ps_enabled, mmu_radix_ps_enabled),
- MMUMETHOD(mmu_object_init_pt, mmu_radix_object_init_pt),
- MMUMETHOD(mmu_protect, mmu_radix_protect),
+static void mmu_radix_growkernel(vm_offset_t);
+static void mmu_radix_init(void);
+static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
+static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+static void mmu_radix_pinit0(pmap_t);
+
+static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t);
+static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
+static void mmu_radix_unmapdev(vm_offset_t, vm_size_t);
+static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
+static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t);
+static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
+static void mmu_radix_scan_init(void);
+static void mmu_radix_cpu_bootstrap(int ap);
+static void mmu_radix_tlbie_all(void);
+
+static struct pmap_funcs mmu_radix_methods = {
+ .bootstrap = mmu_radix_bootstrap,
+ .copy_page = mmu_radix_copy_page,
+ .copy_pages = mmu_radix_copy_pages,
+ .cpu_bootstrap = mmu_radix_cpu_bootstrap,
+ .growkernel = mmu_radix_growkernel,
+ .init = mmu_radix_init,
+ .map = mmu_radix_map,
+ .mincore = mmu_radix_mincore,
+ .pinit = mmu_radix_pinit,
+ .pinit0 = mmu_radix_pinit0,
+
+ .mapdev = mmu_radix_mapdev,
+ .mapdev_attr = mmu_radix_mapdev_attr,
+ .unmapdev = mmu_radix_unmapdev,
+ .kenter_attr = mmu_radix_kenter_attr,
+ .dev_direct_mapped = mmu_radix_dev_direct_mapped,
+ .dumpsys_pa_init = mmu_radix_scan_init,
+ .dumpsys_map_chunk = mmu_radix_dumpsys_map,
+ .page_is_mapped = mmu_radix_page_is_mapped,
+ .ps_enabled = mmu_radix_ps_enabled,
+ .object_init_pt = mmu_radix_object_init_pt,
+ .protect = mmu_radix_protect,
/* pmap dispatcher interface */
- MMUMETHOD(mmu_clear_modify, mmu_radix_clear_modify),
- MMUMETHOD(mmu_copy, mmu_radix_copy),
- MMUMETHOD(mmu_enter, mmu_radix_enter),
- MMUMETHOD(mmu_enter_object, mmu_radix_enter_object),
- MMUMETHOD(mmu_enter_quick, mmu_radix_enter_quick),
- MMUMETHOD(mmu_extract, mmu_radix_extract),
- MMUMETHOD(mmu_extract_and_hold, mmu_radix_extract_and_hold),
- MMUMETHOD(mmu_is_modified, mmu_radix_is_modified),
- MMUMETHOD(mmu_is_prefaultable, mmu_radix_is_prefaultable),
- MMUMETHOD(mmu_is_referenced, mmu_radix_is_referenced),
- MMUMETHOD(mmu_ts_referenced, mmu_radix_ts_referenced),
- MMUMETHOD(mmu_page_exists_quick,mmu_radix_page_exists_quick),
- MMUMETHOD(mmu_page_init, mmu_radix_page_init),
- MMUMETHOD(mmu_page_wired_mappings, mmu_radix_page_wired_mappings),
- MMUMETHOD(mmu_qenter, mmu_radix_qenter),
- MMUMETHOD(mmu_qremove, mmu_radix_qremove),
- MMUMETHOD(mmu_release, mmu_radix_release),
- MMUMETHOD(mmu_remove, mmu_radix_remove),
- MMUMETHOD(mmu_remove_all, mmu_radix_remove_all),
- MMUMETHOD(mmu_remove_write, mmu_radix_remove_write),
- MMUMETHOD(mmu_unwire, mmu_radix_unwire),
- MMUMETHOD(mmu_zero_page, mmu_radix_zero_page),
- MMUMETHOD(mmu_zero_page_area, mmu_radix_zero_page_area),
- MMUMETHOD(mmu_activate, mmu_radix_activate),
- MMUMETHOD(mmu_quick_enter_page, mmu_radix_quick_enter_page),
- MMUMETHOD(mmu_quick_remove_page, mmu_radix_quick_remove_page),
- MMUMETHOD(mmu_page_set_memattr, mmu_radix_page_set_memattr),
- MMUMETHOD(mmu_page_array_startup, mmu_radix_page_array_startup),
+ .clear_modify = mmu_radix_clear_modify,
+ .copy = mmu_radix_copy,
+ .enter = mmu_radix_enter,
+ .enter_object = mmu_radix_enter_object,
+ .enter_quick = mmu_radix_enter_quick,
+ .extract = mmu_radix_extract,
+ .extract_and_hold = mmu_radix_extract_and_hold,
+ .is_modified = mmu_radix_is_modified,
+ .is_prefaultable = mmu_radix_is_prefaultable,
+ .is_referenced = mmu_radix_is_referenced,
+ .ts_referenced = mmu_radix_ts_referenced,
+ .page_exists_quick = mmu_radix_page_exists_quick,
+ .page_init = mmu_radix_page_init,
+ .page_wired_mappings = mmu_radix_page_wired_mappings,
+ .qenter = mmu_radix_qenter,
+ .qremove = mmu_radix_qremove,
+ .release = mmu_radix_release,
+ .remove = mmu_radix_remove,
+ .remove_all = mmu_radix_remove_all,
+ .remove_write = mmu_radix_remove_write,
+ .unwire = mmu_radix_unwire,
+ .zero_page = mmu_radix_zero_page,
+ .zero_page_area = mmu_radix_zero_page_area,
+ .activate = mmu_radix_activate,
+ .quick_enter_page = mmu_radix_quick_enter_page,
+ .quick_remove_page = mmu_radix_quick_remove_page,
+ .page_set_memattr = mmu_radix_page_set_memattr,
+ .page_array_startup = mmu_radix_page_array_startup,
/* Internal interfaces */
- MMUMETHOD(mmu_kenter, mmu_radix_kenter),
- MMUMETHOD(mmu_kextract, mmu_radix_kextract),
- MMUMETHOD(mmu_kremove, mmu_radix_kremove),
- MMUMETHOD(mmu_change_attr, mmu_radix_change_attr),
- MMUMETHOD(mmu_map_user_ptr, mmu_radix_map_user_ptr),
- MMUMETHOD(mmu_decode_kernel_ptr, mmu_radix_decode_kernel_ptr),
-
- MMUMETHOD(mmu_tlbie_all, mmu_radix_tlbie_all),
- { 0, 0 }
+ .kenter = mmu_radix_kenter,
+ .kextract = mmu_radix_kextract,
+ .kremove = mmu_radix_kremove,
+ .change_attr = mmu_radix_change_attr,
+ .map_user_ptr = mmu_radix_map_user_ptr,
+ .decode_kernel_ptr = mmu_radix_decode_kernel_ptr,
+
+ .tlbie_all = mmu_radix_tlbie_all,
};
-MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods, 0);
-
-#define METHODVOID(m) mmu_radix_ ## m(mmu_t mmup)
+MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
struct rwlock **lockp);
@@ -778,7 +771,7 @@ mmu_radix_tlbiel_flush(int scope)
}
static void
-mmu_radix_tlbie_all(mmu_t __unused mmu)
+mmu_radix_tlbie_all()
{
/* TODO: LPID invalidate */
mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
@@ -907,7 +900,7 @@ kvtopte(vm_offset_t va)
}
void
-mmu_radix_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
+mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa)
{
pt_entry_t *pte;
@@ -917,8 +910,8 @@ mmu_radix_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
RPTE_EAA_P | PG_M | PG_A;
}
-boolean_t
-mmu_radix_ps_enabled(mmu_t mmu, pmap_t pmap)
+bool
+mmu_radix_ps_enabled(pmap_t pmap)
{
return (pg_ps_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0);
}
@@ -1025,7 +1018,7 @@ pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
* a 2mpage. Otherwise, returns FALSE.
*/
boolean_t
-mmu_radix_page_is_mapped(mmu_t mmu, vm_page_t m)
+mmu_radix_page_is_mapped(vm_page_t m)
{
struct rwlock *lock;
boolean_t rv;
@@ -2036,7 +2029,7 @@ mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end)
}
static void
-mmu_radix_late_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
+mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end)
{
int i;
vm_paddr_t pa;
@@ -2079,7 +2072,7 @@ mmu_radix_late_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
thread0.td_kstack = va;
for (i = 0; i < kstack_pages; i++) {
- mmu_radix_kenter(mmu, va, pa);
+ mmu_radix_kenter(va, pa);
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
@@ -2190,7 +2183,7 @@ mmu_radix_proctab_init(void)
}
void
-mmu_radix_advise(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
int advice)
{
struct rwlock *lock;
@@ -2304,7 +2297,7 @@ maybe_invlrng:
* Routines used in machine-dependent code
*/
static void
-mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
+mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end)
{
uint64_t lpcr;
@@ -2328,7 +2321,7 @@ mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
/* XXX assume CPU_FTR_HVMODE */
mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL);
- mmu_radix_late_bootstrap(mmu, start, end);
+ mmu_radix_late_bootstrap(start, end);
numa_mem_regions(&numa_pregions, &numa_pregions_sz);
if (bootverbose)
printf("%s done\n", __func__);
@@ -2337,7 +2330,7 @@ mmu_radix_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t end)
}
static void
-mmu_radix_cpu_bootstrap(mmu_t mmu, int ap)
+mmu_radix_cpu_bootstrap(int ap)
{
uint64_t lpcr;
uint64_t ptcr;
@@ -2382,7 +2375,7 @@ SYSCTL_ULONG(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD,
&pmap_l2e_demotions, 0, "1GB page demotions");
void
-mmu_radix_clear_modify(mmu_t mmu, vm_page_t m)
+mmu_radix_clear_modify(vm_page_t m)
{
struct md_page *pvh;
pmap_t pmap;
@@ -2477,7 +2470,7 @@ restart:
}
void
-mmu_radix_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
+mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
vm_size_t len, vm_offset_t src_addr)
{
struct rwlock *lock;
@@ -2634,7 +2627,7 @@ out:
}
static void
-mmu_radix_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
+mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst)
{
vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
@@ -2647,7 +2640,7 @@ mmu_radix_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
}
static void
-mmu_radix_copy_pages(mmu_t mmu, vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
vm_offset_t b_offset, int xfersize)
{
@@ -2767,7 +2760,7 @@ setpte:
#endif /* VM_NRESERVLEVEL > 0 */
int
-mmu_radix_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, u_int flags, int8_t psind)
{
struct rwlock *lock;
@@ -3058,7 +3051,7 @@ unchanged:
* populated, then attempt promotion.
*/
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
- mmu_radix_ps_enabled(mmu, pmap) &&
+ mmu_radix_ps_enabled(pmap) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0 &&
pmap_promote_l3e(pmap, l3e, va, &lock) == 0)
@@ -3225,7 +3218,7 @@ pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags,
}
void
-mmu_radix_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
+mmu_radix_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
{
@@ -3248,7 +3241,7 @@ mmu_radix_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
va = start + ptoa(diff);
if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end &&
- m->psind == 1 && mmu_radix_ps_enabled(mmu, pmap) &&
+ m->psind == 1 && mmu_radix_ps_enabled(pmap) &&
pmap_enter_2mpage(pmap, va, m, prot, &lock))
m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1];
else
@@ -3372,7 +3365,7 @@ mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
}
void
-mmu_radix_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
struct rwlock *lock;
@@ -3392,7 +3385,7 @@ mmu_radix_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
}
vm_paddr_t
-mmu_radix_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+mmu_radix_extract(pmap_t pmap, vm_offset_t va)
{
pml3_entry_t *l3e;
pt_entry_t *pte;
@@ -3424,7 +3417,7 @@ mmu_radix_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
}
vm_page_t
-mmu_radix_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
pml3_entry_t l3e, *l3ep;
pt_entry_t pte;
@@ -3455,7 +3448,7 @@ mmu_radix_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t pro
}
static void
-mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr)
+mmu_radix_growkernel(vm_offset_t addr)
{
vm_paddr_t paddr;
vm_page_t nkpg;
@@ -3480,7 +3473,7 @@ mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr)
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
- mmu_radix_zero_page(mmu, nkpg);
+ mmu_radix_zero_page(nkpg);
paddr = VM_PAGE_TO_PHYS(nkpg);
pde_store(l2e, paddr);
continue; /* try again */
@@ -3501,7 +3494,7 @@ mmu_radix_growkernel(mmu_t mmu, vm_offset_t addr)
if (nkpg == NULL)
panic("pmap_growkernel: no memory to grow kernel");
if ((nkpg->flags & PG_ZERO) == 0)
- mmu_radix_zero_page(mmu, nkpg);
+ mmu_radix_zero_page(nkpg);
paddr = VM_PAGE_TO_PHYS(nkpg);
pde_store(l3e, paddr);
@@ -3559,7 +3552,7 @@ radix_pgd_release(void *arg __unused, void **store, int count)
}
static void
-mmu_radix_init(mmu_t mmu)
+mmu_radix_init()
{
vm_page_t mpte;
vm_size_t s;
@@ -3726,7 +3719,7 @@ out:
* in any physical maps.
*/
boolean_t
-mmu_radix_is_modified(mmu_t mmu, vm_page_t m)
+mmu_radix_is_modified(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
@@ -3742,7 +3735,7 @@ mmu_radix_is_modified(mmu_t mmu, vm_page_t m)
}
boolean_t
-mmu_radix_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
+mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pml3_entry_t *l3e;
pt_entry_t *pte;
@@ -3761,7 +3754,7 @@ mmu_radix_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
}
boolean_t
-mmu_radix_is_referenced(mmu_t mmu, vm_page_t m)
+mmu_radix_is_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
@@ -3790,7 +3783,7 @@ mmu_radix_is_referenced(mmu_t mmu, vm_page_t m)
* released.
*/
boolean_t
-mmu_radix_ts_referenced(mmu_t mmu, vm_page_t m)
+mmu_radix_ts_referenced(vm_page_t m)
{
struct md_page *pvh;
pv_entry_t pv, pvf;
@@ -3928,7 +3921,7 @@ out:
}
static vm_offset_t
-mmu_radix_map(mmu_t mmu, vm_offset_t *virt __unused, vm_paddr_t start,
+mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start,
vm_paddr_t end, int prot __unused)
{
@@ -3938,7 +3931,7 @@ mmu_radix_map(mmu_t mmu, vm_offset_t *virt __unused, vm_paddr_t start,
}
void
-mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
+mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
pml3_entry_t *l3e;
@@ -3953,7 +3946,7 @@ mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
("pmap_object_init_pt: non-device object"));
/* NB: size can be logically ored with addr here */
if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) {
- if (!mmu_radix_ps_enabled(mmu, pmap))
+ if (!mmu_radix_ps_enabled(pmap))
return;
if (!vm_object_populate(object, pindex, pindex + atop(size)))
return;
@@ -4023,7 +4016,7 @@ mmu_radix_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
}
boolean_t
-mmu_radix_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
+mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
struct rwlock *lock;
@@ -4063,7 +4056,7 @@ mmu_radix_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
}
void
-mmu_radix_page_init(mmu_t mmu, vm_page_t m)
+mmu_radix_page_init(vm_page_t m)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
@@ -4072,7 +4065,7 @@ mmu_radix_page_init(mmu_t mmu, vm_page_t m)
}
int
-mmu_radix_page_wired_mappings(mmu_t mmu, vm_page_t m)
+mmu_radix_page_wired_mappings(vm_page_t m)
{
struct rwlock *lock;
struct md_page *pvh;
@@ -4137,8 +4130,8 @@ mmu_radix_update_proctab(int pid, pml1_entry_t l1pa)
isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT);
}
-void
-mmu_radix_pinit(mmu_t mmu, pmap_t pmap)
+int
+mmu_radix_pinit(pmap_t pmap)
{
vmem_addr_t pid;
vm_paddr_t l1pa;
@@ -4162,6 +4155,8 @@ mmu_radix_pinit(mmu_t mmu, pmap_t pmap)
l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1);
mmu_radix_update_proctab(pid, l1pa);
__asm __volatile("ptesync;isync" : : : "memory");
+
+ return (1);
}
/*
@@ -4200,7 +4195,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
return (NULL);
}
if ((m->flags & PG_ZERO) == 0)
- mmu_radix_zero_page(NULL, m);
+ mmu_radix_zero_page(m);
/*
* Map the pagetable page into the process address space, if
@@ -4371,7 +4366,7 @@ retry:
}
static void
-mmu_radix_pinit0(mmu_t mmu, pmap_t pmap)
+mmu_radix_pinit0(pmap_t pmap)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
@@ -4429,7 +4424,7 @@ retry:
}
void
-mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
vm_prot_t prot)
{
vm_offset_t va_next;
@@ -4444,7 +4439,7 @@ mmu_radix_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
if (prot == VM_PROT_NONE) {
- mmu_radix_remove(mmu, pmap, sva, eva);
+ mmu_radix_remove(pmap, sva, eva);
return;
}
@@ -4556,7 +4551,7 @@ retry:
}
void
-mmu_radix_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *ma, int count)
+mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count)
{
CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count);
@@ -4598,7 +4593,7 @@ mmu_radix_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *ma, int count)
}
void
-mmu_radix_qremove(mmu_t mmu, vm_offset_t sva, int count)
+mmu_radix_qremove(vm_offset_t sva, int count)
{
vm_offset_t va;
pt_entry_t *pte;
@@ -4749,7 +4744,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde,
}
void
-mmu_radix_release(mmu_t mmu, pmap_t pmap)
+mmu_radix_release(pmap_t pmap)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
@@ -5112,7 +5107,7 @@ pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
void
-mmu_radix_remove(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
struct rwlock *lock;
vm_offset_t va_next;
@@ -5229,7 +5224,7 @@ out:
}
void
-mmu_radix_remove_all(mmu_t mmu, vm_page_t m)
+mmu_radix_remove_all(vm_page_t m)
{
struct md_page *pvh;
pv_entry_t pv;
@@ -5337,7 +5332,7 @@ retry:
*/
void
-mmu_radix_remove_pages(mmu_t mmu, pmap_t pmap)
+mmu_radix_remove_pages(pmap_t pmap)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
@@ -5506,7 +5501,7 @@ mmu_radix_remove_pages(mmu_t mmu, pmap_t pmap)
}
void
-mmu_radix_remove_write(mmu_t mmu, vm_page_t m)
+mmu_radix_remove_write(vm_page_t m)
{
struct md_page *pvh;
pmap_t pmap;
@@ -5598,7 +5593,7 @@ retry:
* function are not needed.
*/
void
-mmu_radix_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
vm_offset_t va_next;
pml1_entry_t *l1e;
@@ -5670,7 +5665,7 @@ mmu_radix_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
}
void
-mmu_radix_zero_page(mmu_t mmu, vm_page_t m)
+mmu_radix_zero_page(vm_page_t m)
{
vm_offset_t addr;
@@ -5680,7 +5675,7 @@ mmu_radix_zero_page(mmu_t mmu, vm_page_t m)
}
void
-mmu_radix_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
+mmu_radix_zero_page_area(vm_page_t m, int off, int size)
{
caddr_t addr;
@@ -5694,8 +5689,7 @@ mmu_radix_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
static int
-mmu_radix_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
- vm_paddr_t *locked_pa)
+mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{
pml3_entry_t *l3ep;
pt_entry_t pte;
@@ -5740,7 +5734,7 @@ mmu_radix_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
}
void
-mmu_radix_activate(mmu_t mmu, struct thread *td)
+mmu_radix_activate(struct thread *td)
{
pmap_t pmap;
uint32_t curpid;
@@ -5761,7 +5755,7 @@ mmu_radix_activate(mmu_t mmu, struct thread *td)
* different alignment might result in more superpage mappings.
*/
void
-mmu_radix_align_superpage(mmu_t mmu, vm_object_t object, vm_ooffset_t offset,
+mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset,
vm_offset_t *addr, vm_size_t size)
{
@@ -5784,7 +5778,7 @@ mmu_radix_align_superpage(mmu_t mmu, vm_object_t object, vm_ooffset_t offset,
}
static void *
-mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
+mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
{
vm_offset_t va, tmpva, ppa, offset;
@@ -5803,7 +5797,7 @@ mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t att
panic("%s: Couldn't alloc kernel virtual memory", __func__);
for (tmpva = va; size > 0;) {
- mmu_radix_kenter_attr(mmu, tmpva, ppa, attr);
+ mmu_radix_kenter_attr(tmpva, ppa, attr);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
ppa += PAGE_SIZE;
@@ -5814,16 +5808,16 @@ mmu_radix_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t att
}
static void *
-mmu_radix_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size)
{
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
- return (mmu_radix_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
+ return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
}
void
-mmu_radix_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
+mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
@@ -5835,13 +5829,13 @@ mmu_radix_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
* required for data coherence.
*/
if ((m->flags & PG_FICTITIOUS) == 0 &&
- mmu_radix_change_attr(mmu, PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
+ mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)),
PAGE_SIZE, m->md.mdpg_cache_attrs))
panic("memory attribute change on the direct map failed");
}
static void
-mmu_radix_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
+mmu_radix_unmapdev(vm_offset_t va, vm_size_t size)
{
vm_offset_t offset;
@@ -5929,7 +5923,7 @@ pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
}
vm_paddr_t
-mmu_radix_kextract(mmu_t mmu, vm_offset_t va)
+mmu_radix_kextract(vm_offset_t va)
{
pml3_entry_t l3e;
vm_paddr_t pa;
@@ -5980,7 +5974,7 @@ mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
}
static void
-mmu_radix_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
+mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
pt_entry_t *pte, pteval;
uint64_t cache_bits;
@@ -5993,7 +5987,7 @@ mmu_radix_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
}
void
-mmu_radix_kremove(mmu_t mmu, vm_offset_t va)
+mmu_radix_kremove(vm_offset_t va)
{
pt_entry_t *pte;
@@ -6003,7 +5997,7 @@ mmu_radix_kremove(mmu_t mmu, vm_offset_t va)
pte_clear(pte);
}
-int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm,
+int mmu_radix_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen)
{
if ((uintptr_t)uaddr + ulen >= VM_MAXUSER_ADDRESS)
@@ -6017,7 +6011,7 @@ int mmu_radix_map_user_ptr(mmu_t mmu, pmap_t pm,
}
int
-mmu_radix_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+mmu_radix_decode_kernel_ptr(vm_offset_t addr,
int *is_user, vm_offset_t *decoded)
{
@@ -6028,7 +6022,7 @@ mmu_radix_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
}
static boolean_t
-mmu_radix_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
{
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
@@ -6036,7 +6030,7 @@ mmu_radix_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
}
static void
-mmu_radix_scan_init(mmu_t mmup)
+mmu_radix_scan_init()
{
CTR1(KTR_PMAP, "%s()", __func__);
@@ -6044,7 +6038,7 @@ mmu_radix_scan_init(mmu_t mmup)
}
static void
-mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
+mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz,
void **va)
{
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
@@ -6052,7 +6046,7 @@ mmu_radix_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
}
vm_offset_t
-mmu_radix_quick_enter_page(mmu_t mmu, vm_page_t m)
+mmu_radix_quick_enter_page(vm_page_t m)
{
vm_paddr_t paddr;
@@ -6062,7 +6056,7 @@ mmu_radix_quick_enter_page(mmu_t mmu, vm_page_t m)
}
void
-mmu_radix_quick_remove_page(mmu_t mmu, vm_offset_t addr __unused)
+mmu_radix_quick_remove_page(vm_offset_t addr __unused)
{
/* no work to do here */
CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
@@ -6075,7 +6069,7 @@ pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
}
int
-mmu_radix_change_attr(mmu_t mmu, vm_offset_t va, vm_size_t size,
+mmu_radix_change_attr(vm_offset_t va, vm_size_t size,
vm_memattr_t mode)
{
int error;
@@ -6301,7 +6295,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
* attempting to back the vm_pages with domain-local memory.
*/
void
-mmu_radix_page_array_startup(mmu_t mmu, long pages)
+mmu_radix_page_array_startup(long pages)
{
#ifdef notyet
pml2_entry_t *l2e;
@@ -6321,7 +6315,7 @@ mmu_radix_page_array_startup(mmu_t mmu, long pages)
pa = vm_phys_early_alloc(0, end - start);
- start = mmu_radix_map(mmu, &start, pa, end - start, VM_MEMATTR_DEFAULT);
+ start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT);
#ifdef notyet
/* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */
for (va = start; va < end; va += L3_PAGE_SIZE) {
diff --git a/sys/powerpc/aim/moea64_if.m b/sys/powerpc/aim/moea64_if.m
deleted file mode 100644
index 738c78b72466..000000000000
--- a/sys/powerpc/aim/moea64_if.m
+++ /dev/null
@@ -1,122 +0,0 @@
-#-
-# Copyright (c) 2010,2015 Nathan Whitehorn
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-# $FreeBSD$
-#
-
-
-#include <sys/param.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/systm.h>
-
-#include <vm/vm.h>
-#include <vm/vm_page.h>
-
-#include <machine/mmuvar.h>
-
-/**
- * MOEA64 kobj methods for 64-bit Book-S page table
- * manipulation routines used, for example, by hypervisors.
- */
-
-INTERFACE moea64;
-SINGLETON;
-
-CODE {
- static moea64_pte_replace_t moea64_pte_replace_default;
-
- static int64_t moea64_pte_replace_default(mmu_t mmu,
- struct pvo_entry *pvo, int flags)
- {
- int64_t refchg;
-
- refchg = MOEA64_PTE_UNSET(mmu, pvo);
- MOEA64_PTE_INSERT(mmu, pvo);
-
- return (refchg);
- }
-}
-
-/**
- * Return ref/changed bits from PTE referenced by _pvo if _pvo is currently in
- * the page table. Returns -1 if _pvo not currently present in the page table.
- */
-METHOD int64_t pte_synch {
- mmu_t _mmu;
- struct pvo_entry *_pvo;
-};
-
-/**
- * Clear bits ptebit (a mask) from the low word of the PTE referenced by
- * _pvo. Return previous values of ref/changed bits or -1 if _pvo is not
- * currently in the page table.
- */
-METHOD int64_t pte_clear {
- mmu_t _mmu;
- struct pvo_entry *_pvo;
- uint64_t _ptebit;
-};
-
-/**
- * Invalidate the PTE referenced by _pvo, returning its ref/changed bits.
- * Returns -1 if PTE not currently present in page table.
- */
-METHOD int64_t pte_unset {
- mmu_t _mmu;
- struct pvo_entry *_pvo;
-};
-
-/**
- * Update the reference PTE to correspond to the contents of _pvo. Has the
- * same ref/changed semantics as pte_unset() (and should clear R/C bits). May
- * change the PVO's location in the page table or return with it unmapped if
- * PVO_WIRED is not set. By default, does unset() followed by insert().
- *
- * _flags is a bitmask describing what level of page invalidation should occur:
- * 0 means no invalidation is required
- * MOEA64_PTE_PROT_UPDATE signifies that the page protection bits are changing
- * MOEA64_PTE_INVALIDATE requires an invalidation of the same strength as
- * pte_unset() followed by pte_insert()
- */
-METHOD int64_t pte_replace {
- mmu_t _mmu;
- struct pvo_entry *_pvo;
- int _flags;
-} DEFAULT moea64_pte_replace_default;
-
-/**
- * Insert a PTE corresponding to _pvo into the page table, returning any errors
- * encountered and (optionally) setting the PVO slot value to some
- * representation of where the entry was placed.
- *
- * Must not replace PTEs marked LPTE_WIRED. If an existing valid PTE is spilled,
- * must synchronize ref/changed bits as in pte_unset().
- */
-METHOD int pte_insert {
- mmu_t _mmu;
- struct pvo_entry *_pvo;
-};
-
diff --git a/sys/powerpc/aim/moea64_native.c b/sys/powerpc/aim/moea64_native.c
index 24889eaa6ea4..7c5982a7db7a 100644
--- a/sys/powerpc/aim/moea64_native.c
+++ b/sys/powerpc/aim/moea64_native.c
@@ -121,8 +121,6 @@ __FBSDID("$FreeBSD$");
#include <machine/mmuvar.h>
#include "mmu_oea64.h"
-#include "mmu_if.h"
-#include "moea64_if.h"
#define PTESYNC() __asm __volatile("ptesync");
#define TLBSYNC() __asm __volatile("tlbsync; ptesync");
@@ -215,46 +213,56 @@ static volatile struct pate *moea64_part_table;
/*
* Dump function.
*/
-static void *moea64_dump_pmap_native(mmu_t mmu, void *ctx, void *buf,
+static void *moea64_dump_pmap_native(void *ctx, void *buf,
u_long *nbytes);
/*
* PTE calls.
*/
-static int moea64_pte_insert_native(mmu_t, struct pvo_entry *);
-static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *);
-static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
-static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
-static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
+static int64_t moea64_pte_insert_native(struct pvo_entry *);
+static int64_t moea64_pte_synch_native(struct pvo_entry *);
+static int64_t moea64_pte_clear_native(struct pvo_entry *, uint64_t);
+static int64_t moea64_pte_replace_native(struct pvo_entry *, int);
+static int64_t moea64_pte_unset_native(struct pvo_entry *);
/*
* Utility routines.
*/
-static void moea64_bootstrap_native(mmu_t mmup,
+static void moea64_bootstrap_native(
vm_offset_t kernelstart, vm_offset_t kernelend);
-static void moea64_cpu_bootstrap_native(mmu_t, int ap);
+static void moea64_cpu_bootstrap_native(int ap);
static void tlbia(void);
+static void moea64_install_native(void);
-static mmu_method_t moea64_native_methods[] = {
- /* Internal interfaces */
- MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
- MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
- MMUMETHOD(mmu_dump_pmap, moea64_dump_pmap_native),
+static struct pmap_funcs moea64_native_methods = {
+ .install = moea64_install_native,
- MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
- MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
- MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
- MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native),
- MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
+ /* Internal interfaces */
+ .bootstrap = moea64_bootstrap_native,
+ .cpu_bootstrap = moea64_cpu_bootstrap_native,
+ .dumpsys_dump_pmap = moea64_dump_pmap_native,
+};
- { 0, 0 }
+static struct moea64_funcs moea64_native_funcs = {
+ .pte_synch = moea64_pte_synch_native,
+ .pte_clear = moea64_pte_clear_native,
+ .pte_unset = moea64_pte_unset_native,
+ .pte_replace = moea64_pte_replace_native,
+ .pte_insert = moea64_pte_insert_native,
};
-MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
- 0, oea64_mmu);
+MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, oea64_mmu);
+
+static void
+moea64_install_native()
+{
+
+ /* Install the MOEA64 ops. */
+ moea64_ops = &moea64_native_funcs;
+}
static int64_t
-moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
+moea64_pte_synch_native(struct pvo_entry *pvo)
{
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
uint64_t ptelo, pvo_ptevpn;
@@ -279,7 +287,7 @@ moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
}
static int64_t
-moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
+moea64_pte_clear_native(struct pvo_entry *pvo, uint64_t ptebit)
{
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
struct lpte properpt;
@@ -317,15 +325,15 @@ moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
critical_exit();
} else {
rw_runlock(&moea64_eviction_lock);
- ptelo = moea64_pte_unset_native(mmu, pvo);
- moea64_pte_insert_native(mmu, pvo);
+ ptelo = moea64_pte_unset_native(pvo);
+ moea64_pte_insert_native(pvo);
}
return (ptelo & (LPTE_REF | LPTE_CHG));
}
static int64_t
-moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
+moea64_pte_unset_native(struct pvo_entry *pvo)
{
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
uint64_t ptelo, pvo_ptevpn;
@@ -361,7 +369,7 @@ moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
}
static int64_t
-moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo,
+moea64_pte_replace_inval_native(struct pvo_entry *pvo,
volatile struct lpte *pt)
{
struct lpte properpt;
@@ -400,7 +408,7 @@ moea64_pte_replace_inval_native(mmu_t mmu, struct pvo_entry *pvo,
}
static int64_t
-moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
+moea64_pte_replace_native(struct pvo_entry *pvo, int flags)
{
volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
struct lpte properpt;
@@ -421,14 +429,14 @@ moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
rw_runlock(&moea64_eviction_lock);
} else {
/* Otherwise, need reinsertion and deletion */
- ptelo = moea64_pte_replace_inval_native(mmu, pvo, pt);
+ ptelo = moea64_pte_replace_inval_native(pvo, pt);
}
return (ptelo);
}
static void
-moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
+moea64_cpu_bootstrap_native(int ap)
{
int i = 0;
#ifdef __powerpc64__
@@ -485,15 +493,14 @@ moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
}
static void
-moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
- vm_offset_t kernelend)
+moea64_bootstrap_native(vm_offset_t kernelstart, vm_offset_t kernelend)
{
vm_size_t size;
vm_offset_t off;
vm_paddr_t pa;
register_t msr;
- moea64_early_bootstrap(mmup, kernelstart, kernelend);
+ moea64_early_bootstrap(kernelstart, kernelend);
switch (mfpvr() >> 16) {
case IBMPOWER9:
@@ -557,7 +564,7 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
- moea64_mid_bootstrap(mmup, kernelstart, kernelend);
+ moea64_mid_bootstrap(kernelstart, kernelend);
/*
* Add a mapping for the page table itself if there is no direct map.
@@ -572,7 +579,7 @@ moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
}
/* Bring up virtual memory */
- moea64_late_bootstrap(mmup, kernelstart, kernelend);
+ moea64_late_bootstrap(kernelstart, kernelend);
}
static void
@@ -715,8 +722,8 @@ moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
return (k);
}
-static int
-moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
+static int64_t
+moea64_pte_insert_native(struct pvo_entry *pvo)
{
struct lpte insertpt;
uintptr_t slot;
@@ -790,7 +797,7 @@ moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
}
static void *
-moea64_dump_pmap_native(mmu_t mmu, void *ctx, void *buf, u_long *nbytes)
+moea64_dump_pmap_native(void *ctx, void *buf, u_long *nbytes)
{
struct dump_context *dctx;
u_long ptex, ptex_end;
diff --git a/sys/powerpc/booke/booke_machdep.c b/sys/powerpc/booke/booke_machdep.c
index 6def6c6d681b..f1426d06ce13 100644
--- a/sys/powerpc/booke/booke_machdep.c
+++ b/sys/powerpc/booke/booke_machdep.c
@@ -115,6 +115,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_extern.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c
index 0aea308e88fa..1ff0ad83f801 100644
--- a/sys/powerpc/booke/pmap.c
+++ b/sys/powerpc/booke/pmap.c
@@ -125,8 +125,6 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
-#include "mmu_if.h"
-
#define SPARSE_MAPDEV
/* Use power-of-two mappings in mmu_booke_mapdev(), to save entries. */
@@ -182,7 +180,7 @@ static struct mtx tlbivax_mutex;
/* PMAP */
/**************************************************************************/
-static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+static int mmu_booke_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, u_int flags, int8_t psind);
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
@@ -263,10 +261,10 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
#define PMAP_SHPGPERPROC 200
#endif
-static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
-static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
-static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
-static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
+static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
+static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
+static int pte_remove(pmap_t, vm_offset_t, uint8_t);
+static pte_t *pte_find(pmap_t, vm_offset_t);
static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
static pv_entry_t pv_alloc(void);
@@ -287,143 +285,141 @@ void pmap_bootstrap_ap(volatile uint32_t *);
/*
* Kernel MMU interface
*/
-static void mmu_booke_clear_modify(mmu_t, vm_page_t);
-static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
+static void mmu_booke_clear_modify(vm_page_t);
+static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t,
vm_size_t, vm_offset_t);
-static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
-static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
+static void mmu_booke_copy_page(vm_page_t, vm_page_t);
+static void mmu_booke_copy_pages(vm_page_t *,
vm_offset_t, vm_page_t *, vm_offset_t, int);
-static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+static int mmu_booke_enter(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, u_int flags, int8_t psind);
-static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
+static void mmu_booke_enter_object(pmap_t, vm_offset_t, vm_offset_t,
vm_page_t, vm_prot_t);
-static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+static void mmu_booke_enter_quick(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t);
-static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
-static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
+static vm_paddr_t mmu_booke_extract(pmap_t, vm_offset_t);
+static vm_page_t mmu_booke_extract_and_hold(pmap_t, vm_offset_t,
vm_prot_t);
-static void mmu_booke_init(mmu_t);
-static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
-static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
-static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t);
-static int mmu_booke_ts_referenced(mmu_t, vm_page_t);
-static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
+static void mmu_booke_init(void);
+static boolean_t mmu_booke_is_modified(vm_page_t);
+static boolean_t mmu_booke_is_prefaultable(pmap_t, vm_offset_t);
+static boolean_t mmu_booke_is_referenced(vm_page_t);
+static int mmu_booke_ts_referenced(vm_page_t);
+static vm_offset_t mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t,
int);
-static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
+static int mmu_booke_mincore(pmap_t, vm_offset_t,
vm_paddr_t *);
-static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
+static void mmu_booke_object_init_pt(pmap_t, vm_offset_t,
vm_object_t, vm_pindex_t, vm_size_t);
-static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
-static void mmu_booke_page_init(mmu_t, vm_page_t);
-static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
-static void mmu_booke_pinit(mmu_t, pmap_t);
-static void mmu_booke_pinit0(mmu_t, pmap_t);
-static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
+static boolean_t mmu_booke_page_exists_quick(pmap_t, vm_page_t);
+static void mmu_booke_page_init(vm_page_t);
+static int mmu_booke_page_wired_mappings(vm_page_t);
+static int mmu_booke_pinit(pmap_t);
+static void mmu_booke_pinit0(pmap_t);
+static void mmu_booke_protect(pmap_t, vm_offset_t, vm_offset_t,
vm_prot_t);
-static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
-static void mmu_booke_qremove(mmu_t, vm_offset_t, int);
-static void mmu_booke_release(mmu_t, pmap_t);
-static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-static void mmu_booke_remove_all(mmu_t, vm_page_t);
-static void mmu_booke_remove_write(mmu_t, vm_page_t);
-static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-static void mmu_booke_zero_page(mmu_t, vm_page_t);
-static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
-static void mmu_booke_activate(mmu_t, struct thread *);
-static void mmu_booke_deactivate(mmu_t, struct thread *);
-static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
-static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
-static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
-static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
-static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t);
-static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
-static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
-static void mmu_booke_kremove(mmu_t, vm_offset_t);
-static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
-static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
+static void mmu_booke_qenter(vm_offset_t, vm_page_t *, int);
+static void mmu_booke_qremove(vm_offset_t, int);
+static void mmu_booke_release(pmap_t);
+static void mmu_booke_remove(pmap_t, vm_offset_t, vm_offset_t);
+static void mmu_booke_remove_all(vm_page_t);
+static void mmu_booke_remove_write(vm_page_t);
+static void mmu_booke_unwire(pmap_t, vm_offset_t, vm_offset_t);
+static void mmu_booke_zero_page(vm_page_t);
+static void mmu_booke_zero_page_area(vm_page_t, int, int);
+static void mmu_booke_activate(struct thread *);
+static void mmu_booke_deactivate(struct thread *);
+static void mmu_booke_bootstrap(vm_offset_t, vm_offset_t);
+static void *mmu_booke_mapdev(vm_paddr_t, vm_size_t);
+static void *mmu_booke_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
+static void mmu_booke_unmapdev(vm_offset_t, vm_size_t);
+static vm_paddr_t mmu_booke_kextract(vm_offset_t);
+static void mmu_booke_kenter(vm_offset_t, vm_paddr_t);
+static void mmu_booke_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t);
+static void mmu_booke_kremove(vm_offset_t);
+static boolean_t mmu_booke_dev_direct_mapped(vm_paddr_t, vm_size_t);
+static void mmu_booke_sync_icache(pmap_t, vm_offset_t,
vm_size_t);
-static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
+static void mmu_booke_dumpsys_map(vm_paddr_t pa, size_t,
void **);
-static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
+static void mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t,
void *);
-static void mmu_booke_scan_init(mmu_t);
-static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m);
-static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr);
-static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
+static void mmu_booke_scan_init(void);
+static vm_offset_t mmu_booke_quick_enter_page(vm_page_t m);
+static void mmu_booke_quick_remove_page(vm_offset_t addr);
+static int mmu_booke_change_attr(vm_offset_t addr,
vm_size_t sz, vm_memattr_t mode);
-static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
+static int mmu_booke_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
-static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
+static int mmu_booke_decode_kernel_ptr(vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
-static void mmu_booke_page_array_startup(mmu_t , long);
-static boolean_t mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m);
+static void mmu_booke_page_array_startup(long);
+static boolean_t mmu_booke_page_is_mapped(vm_page_t m);
-static mmu_method_t mmu_booke_methods[] = {
+static struct pmap_funcs mmu_booke_methods = {
/* pmap dispatcher interface */
- MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
- MMUMETHOD(mmu_copy, mmu_booke_copy),
- MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
- MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages),
- MMUMETHOD(mmu_enter, mmu_booke_enter),
- MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
- MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
- MMUMETHOD(mmu_extract, mmu_booke_extract),
- MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
- MMUMETHOD(mmu_init, mmu_booke_init),
- MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
- MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
- MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced),
- MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
- MMUMETHOD(mmu_map, mmu_booke_map),
- MMUMETHOD(mmu_mincore, mmu_booke_mincore),
- MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
- MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
- MMUMETHOD(mmu_page_init, mmu_booke_page_init),
- MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
- MMUMETHOD(mmu_pinit, mmu_booke_pinit),
- MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
- MMUMETHOD(mmu_protect, mmu_booke_protect),
- MMUMETHOD(mmu_qenter, mmu_booke_qenter),
- MMUMETHOD(mmu_qremove, mmu_booke_qremove),
- MMUMETHOD(mmu_release, mmu_booke_release),
- MMUMETHOD(mmu_remove, mmu_booke_remove),
- MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
- MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
- MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache),
- MMUMETHOD(mmu_unwire, mmu_booke_unwire),
- MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
- MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
- MMUMETHOD(mmu_activate, mmu_booke_activate),
- MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
- MMUMETHOD(mmu_quick_enter_page, mmu_booke_quick_enter_page),
- MMUMETHOD(mmu_quick_remove_page, mmu_booke_quick_remove_page),
- MMUMETHOD(mmu_page_array_startup, mmu_booke_page_array_startup),
- MMUMETHOD(mmu_page_is_mapped, mmu_booke_page_is_mapped),
+ .clear_modify = mmu_booke_clear_modify,
+ .copy = mmu_booke_copy,
+ .copy_page = mmu_booke_copy_page,
+ .copy_pages = mmu_booke_copy_pages,
+ .enter = mmu_booke_enter,
+ .enter_object = mmu_booke_enter_object,
+ .enter_quick = mmu_booke_enter_quick,
+ .extract = mmu_booke_extract,
+ .extract_and_hold = mmu_booke_extract_and_hold,
+ .init = mmu_booke_init,
+ .is_modified = mmu_booke_is_modified,
+ .is_prefaultable = mmu_booke_is_prefaultable,
+ .is_referenced = mmu_booke_is_referenced,
+ .ts_referenced = mmu_booke_ts_referenced,
+ .map = mmu_booke_map,
+ .mincore = mmu_booke_mincore,
+ .object_init_pt = mmu_booke_object_init_pt,
+ .page_exists_quick = mmu_booke_page_exists_quick,
+ .page_init = mmu_booke_page_init,
+ .page_wired_mappings = mmu_booke_page_wired_mappings,
+ .pinit = mmu_booke_pinit,
+ .pinit0 = mmu_booke_pinit0,
+ .protect = mmu_booke_protect,
+ .qenter = mmu_booke_qenter,
+ .qremove = mmu_booke_qremove,
+ .release = mmu_booke_release,
+ .remove = mmu_booke_remove,
+ .remove_all = mmu_booke_remove_all,
+ .remove_write = mmu_booke_remove_write,
+ .sync_icache = mmu_booke_sync_icache,
+ .unwire = mmu_booke_unwire,
+ .zero_page = mmu_booke_zero_page,
+ .zero_page_area = mmu_booke_zero_page_area,
+ .activate = mmu_booke_activate,
+ .deactivate = mmu_booke_deactivate,
+ .quick_enter_page = mmu_booke_quick_enter_page,
+ .quick_remove_page = mmu_booke_quick_remove_page,
+ .page_array_startup = mmu_booke_page_array_startup,
+ .page_is_mapped = mmu_booke_page_is_mapped,
/* Internal interfaces */
- MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
- MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
- MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
- MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr),
- MMUMETHOD(mmu_kenter, mmu_booke_kenter),
- MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr),
- MMUMETHOD(mmu_kextract, mmu_booke_kextract),
- MMUMETHOD(mmu_kremove, mmu_booke_kremove),
- MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
- MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
- MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
- MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
+ .bootstrap = mmu_booke_bootstrap,
+ .dev_direct_mapped = mmu_booke_dev_direct_mapped,
+ .mapdev = mmu_booke_mapdev,
+ .mapdev_attr = mmu_booke_mapdev_attr,
+ .kenter = mmu_booke_kenter,
+ .kenter_attr = mmu_booke_kenter_attr,
+ .kextract = mmu_booke_kextract,
+ .kremove = mmu_booke_kremove,
+ .unmapdev = mmu_booke_unmapdev,
+ .change_attr = mmu_booke_change_attr,
+ .map_user_ptr = mmu_booke_map_user_ptr,
+ .decode_kernel_ptr = mmu_booke_decode_kernel_ptr,
/* dumpsys() support */
- MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
- MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
- MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
-
- { 0, 0 }
+ .dumpsys_map_chunk = mmu_booke_dumpsys_map,
+ .dumpsys_unmap_chunk = mmu_booke_dumpsys_unmap,
+ .dumpsys_pa_init = mmu_booke_scan_init,
};
-MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
+MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods);
#ifdef __powerpc64__
#include "pmap_64.c"
@@ -632,7 +628,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
* This is called during booke_init, before the system is really initialized.
*/
static void
-mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
+mmu_booke_bootstrap(vm_offset_t start, vm_offset_t kernelend)
{
vm_paddr_t phys_kernelend;
struct mem_region *mp, *mp1;
@@ -940,7 +936,7 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
for (i = 0; i < kstack_pages; i++) {
- mmu_booke_kenter(mmu, kstack0, kstack0_phys);
+ mmu_booke_kenter(kstack0, kstack0_phys);
kstack0 += PAGE_SIZE;
kstack0_phys += PAGE_SIZE;
}
@@ -1012,12 +1008,12 @@ SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, booke_pmap_init_qpages, NULL);
* Get the physical page address for the given pmap/virtual address.
*/
static vm_paddr_t
-mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+mmu_booke_extract(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
PMAP_LOCK(pmap);
- pa = pte_vatopa(mmu, pmap, va);
+ pa = pte_vatopa(pmap, va);
PMAP_UNLOCK(pmap);
return (pa);
@@ -1028,7 +1024,7 @@ mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
* kernel virtual address.
*/
static vm_paddr_t
-mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
+mmu_booke_kextract(vm_offset_t va)
{
tlb_entry_t e;
vm_paddr_t p = 0;
@@ -1040,7 +1036,7 @@ mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
#endif
if (va >= VM_MIN_KERNEL_ADDRESS && va <= VM_MAX_KERNEL_ADDRESS)
- p = pte_vatopa(mmu, kernel_pmap, va);
+ p = pte_vatopa(kernel_pmap, va);
if (p == 0) {
/* Check TLB1 mappings */
@@ -1062,7 +1058,7 @@ mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
* system needs to map virtual memory.
*/
static void
-mmu_booke_init(mmu_t mmu)
+mmu_booke_init()
{
int shpgperproc = PMAP_SHPGPERPROC;
@@ -1099,13 +1095,13 @@ mmu_booke_init(mmu_t mmu)
* references recorded. Existing mappings in the region are overwritten.
*/
static void
-mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
+mmu_booke_qenter(vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
va = sva;
while (count-- > 0) {
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
+ mmu_booke_kenter(va, VM_PAGE_TO_PHYS(*m));
va += PAGE_SIZE;
m++;
}
@@ -1116,13 +1112,13 @@ mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
* temporary mappings entered by mmu_booke_qenter.
*/
static void
-mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
+mmu_booke_qremove(vm_offset_t sva, int count)
{
vm_offset_t va;
va = sva;
while (count-- > 0) {
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kremove(va);
va += PAGE_SIZE;
}
}
@@ -1131,14 +1127,14 @@ mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
* Map a wired page into kernel virtual address space.
*/
static void
-mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
+mmu_booke_kenter(vm_offset_t va, vm_paddr_t pa)
{
- mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
+ mmu_booke_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
}
static void
-mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
+mmu_booke_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
uint32_t flags;
pte_t *pte;
@@ -1150,7 +1146,7 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
flags |= tlb_calc_wimg(pa, ma) << PTE_MAS2_SHIFT;
flags |= PTE_PS_4KB;
- pte = pte_find(mmu, kernel_pmap, va);
+ pte = pte_find(kernel_pmap, va);
KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
mtx_lock_spin(&tlbivax_mutex);
@@ -1182,7 +1178,7 @@ mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
* Remove a page from kernel page table.
*/
static void
-mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
+mmu_booke_kremove(vm_offset_t va)
{
pte_t *pte;
@@ -1192,7 +1188,7 @@ mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
(va <= VM_MAX_KERNEL_ADDRESS)),
("mmu_booke_kremove: invalid va"));
- pte = pte_find(mmu, kernel_pmap, va);
+ pte = pte_find(kernel_pmap, va);
if (!PTE_ISVALID(pte)) {
@@ -1218,7 +1214,7 @@ mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
* called in this thread. This is used internally in copyin/copyout.
*/
int
-mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
+mmu_booke_map_user_ptr(pmap_t pm, volatile const void *uaddr,
void **kaddr, size_t ulen, size_t *klen)
{
@@ -1238,7 +1234,7 @@ mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
* address space.
*/
static int
-mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
+mmu_booke_decode_kernel_ptr(vm_offset_t addr, int *is_user,
vm_offset_t *decoded_addr)
{
@@ -1252,7 +1248,7 @@ mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
}
static boolean_t
-mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m)
+mmu_booke_page_is_mapped(vm_page_t m)
{
return (!TAILQ_EMPTY(&(m)->md.pv_list));
@@ -1262,11 +1258,11 @@ mmu_booke_page_is_mapped(mmu_t mmu, vm_page_t m)
* Initialize pmap associated with process 0.
*/
static void
-mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
+mmu_booke_pinit0(pmap_t pmap)
{
PMAP_LOCK_INIT(pmap);
- mmu_booke_pinit(mmu, pmap);
+ mmu_booke_pinit(pmap);
PCPU_SET(curpmap, pmap);
}
@@ -1276,21 +1272,21 @@ mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
* will be wired down.
*/
static int
-mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+mmu_booke_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, u_int flags, int8_t psind)
{
int error;
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
+ error = mmu_booke_enter_locked(pmap, va, m, prot, flags, psind);
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
return (error);
}
static int
-mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+mmu_booke_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
{
pte_t *pte;
@@ -1328,7 +1324,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
* If there is an existing mapping, and the physical address has not
* changed, must be protection or wiring change.
*/
- if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
+ if (((pte = pte_find(pmap, va)) != NULL) &&
(PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
/*
@@ -1439,7 +1435,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
flags |= PTE_WIRED;
- error = pte_enter(mmu, pmap, m, va, flags,
+ error = pte_enter(pmap, m, va, flags,
(pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
if (error != 0)
return (KERN_RESOURCE_SHORTAGE);
@@ -1473,7 +1469,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
* corresponding offset from m_start are mapped.
*/
static void
-mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
+mmu_booke_enter_object(pmap_t pmap, vm_offset_t start,
vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
{
vm_page_t m;
@@ -1486,7 +1482,7 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
- mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
+ mmu_booke_enter_locked(pmap, start + ptoa(diff), m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE),
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
m = TAILQ_NEXT(m, listq);
@@ -1496,13 +1492,13 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
}
static void
-mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+mmu_booke_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
- mmu_booke_enter_locked(mmu, pmap, va, m,
+ mmu_booke_enter_locked(pmap, va, m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
PMAP_ENTER_QUICK_LOCKED, 0);
PMAP_UNLOCK(pmap);
@@ -1515,7 +1511,7 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
* It is assumed that the start and end are properly rounded to the page size.
*/
static void
-mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
+mmu_booke_remove(pmap_t pmap, vm_offset_t va, vm_offset_t endva)
{
pte_t *pte;
uint8_t hold_flag;
@@ -1545,12 +1541,12 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
for (; va < endva; va += PAGE_SIZE) {
- pte = pte_find_next(mmu, pmap, &va);
+ pte = pte_find_next(pmap, &va);
if ((pte == NULL) || !PTE_ISVALID(pte))
break;
if (va >= endva)
break;
- pte_remove(mmu, pmap, va, hold_flag);
+ pte_remove(pmap, va, hold_flag);
}
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
@@ -1562,7 +1558,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
* Remove physical page from all pmaps in which it resides.
*/
static void
-mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
+mmu_booke_remove_all(vm_page_t m)
{
pv_entry_t pv, pvn;
uint8_t hold_flag;
@@ -1571,7 +1567,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_link, pvn) {
PMAP_LOCK(pv->pv_pmap);
hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
- pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
+ pte_remove(pv->pv_pmap, pv->pv_va, hold_flag);
PMAP_UNLOCK(pv->pv_pmap);
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -1582,7 +1578,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
* Map a range of physical addresses into kernel virtual address space.
*/
static vm_offset_t
-mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
+mmu_booke_map(vm_offset_t *virt, vm_paddr_t pa_start,
vm_paddr_t pa_end, int prot)
{
vm_offset_t sva = *virt;
@@ -1595,7 +1591,7 @@ mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
#endif
while (pa_start < pa_end) {
- mmu_booke_kenter(mmu, va, pa_start);
+ mmu_booke_kenter(va, pa_start);
va += PAGE_SIZE;
pa_start += PAGE_SIZE;
}
@@ -1609,7 +1605,7 @@ mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
* way.
*/
static void
-mmu_booke_activate(mmu_t mmu, struct thread *td)
+mmu_booke_activate(struct thread *td)
{
pmap_t pmap;
u_int cpuid;
@@ -1646,7 +1642,7 @@ mmu_booke_activate(mmu_t mmu, struct thread *td)
* Deactivate the specified process's address space.
*/
static void
-mmu_booke_deactivate(mmu_t mmu, struct thread *td)
+mmu_booke_deactivate(struct thread *td)
{
pmap_t pmap;
@@ -1669,7 +1665,7 @@ mmu_booke_deactivate(mmu_t mmu, struct thread *td)
* This routine is only advisory and need not do anything.
*/
static void
-mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
+mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap,
vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
{
@@ -1679,7 +1675,7 @@ mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
* Set the physical protection on the specified range of this map as requested.
*/
static void
-mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
+mmu_booke_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
vm_prot_t prot)
{
vm_offset_t va;
@@ -1687,7 +1683,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pte_t *pte;
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
- mmu_booke_remove(mmu, pmap, sva, eva);
+ mmu_booke_remove(pmap, sva, eva);
return;
}
@@ -1696,7 +1692,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
PMAP_LOCK(pmap);
for (va = sva; va < eva; va += PAGE_SIZE) {
- if ((pte = pte_find(mmu, pmap, va)) != NULL) {
+ if ((pte = pte_find(pmap, va)) != NULL) {
if (PTE_ISVALID(pte)) {
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
@@ -1722,7 +1718,7 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
* Clear the write and modified bits in each of the given page's mappings.
*/
static void
-mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
+mmu_booke_remove_write(vm_page_t m)
{
pv_entry_t pv;
pte_t *pte;
@@ -1736,7 +1732,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
+ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL) {
if (PTE_ISVALID(pte)) {
m = PHYS_TO_VM_PAGE(PTE_PA(pte));
@@ -1766,7 +1762,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
* protection.
*/
static vm_page_t
-mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
+mmu_booke_extract_and_hold(pmap_t pmap, vm_offset_t va,
vm_prot_t prot)
{
pte_t *pte;
@@ -1775,7 +1771,7 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
m = NULL;
PMAP_LOCK(pmap);
- pte = pte_find(mmu, pmap, va);
+ pte = pte_find(pmap, va);
if ((pte != NULL) && PTE_ISVALID(pte)) {
if (pmap == kernel_pmap)
pte_wbit = PTE_SW;
@@ -1796,7 +1792,7 @@ mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
* Initialize a vm_page's machine-dependent fields.
*/
static void
-mmu_booke_page_init(mmu_t mmu, vm_page_t m)
+mmu_booke_page_init(vm_page_t m)
{
m->md.pv_tracked = 0;
@@ -1808,7 +1804,7 @@ mmu_booke_page_init(mmu_t mmu, vm_page_t m)
* in any of physical maps.
*/
static boolean_t
-mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
+mmu_booke_is_modified(vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
@@ -1827,7 +1823,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
+ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISMODIFIED(pte))
rv = TRUE;
@@ -1845,7 +1841,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
* for prefault.
*/
static boolean_t
-mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
+mmu_booke_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (FALSE);
@@ -1856,7 +1852,7 @@ mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
* in any physical maps.
*/
static boolean_t
-mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
+mmu_booke_is_referenced(vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
@@ -1868,7 +1864,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
+ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISREFERENCED(pte))
rv = TRUE;
@@ -1885,7 +1881,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
* Clear the modify bits on the specified physical page.
*/
static void
-mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
+mmu_booke_clear_modify(vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
@@ -1900,7 +1896,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
+ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@@ -1934,7 +1930,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
* to pmap_is_modified().
*/
static int
-mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
+mmu_booke_ts_referenced(vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
@@ -1946,7 +1942,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
+ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISMODIFIED(pte))
vm_page_dirty(m);
@@ -1982,14 +1978,14 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
* there is no need to invalidate any TLB entries.
*/
static void
-mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+mmu_booke_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
vm_offset_t va;
pte_t *pte;
PMAP_LOCK(pmap);
for (va = sva; va < eva; va += PAGE_SIZE) {
- if ((pte = pte_find(mmu, pmap, va)) != NULL &&
+ if ((pte = pte_find(pmap, va)) != NULL &&
PTE_ISVALID(pte)) {
if (!PTE_ISWIRED(pte))
panic("mmu_booke_unwire: pte %p isn't wired",
@@ -2009,7 +2005,7 @@ mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* page aging.
*/
static boolean_t
-mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
+mmu_booke_page_exists_quick(pmap_t pmap, vm_page_t m)
{
pv_entry_t pv;
int loops;
@@ -2037,7 +2033,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
* wired.
*/
static int
-mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
+mmu_booke_page_wired_mappings(vm_page_t m)
{
pv_entry_t pv;
pte_t *pte;
@@ -2048,7 +2044,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
- if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
+ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL)
if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
count++;
PMAP_UNLOCK(pv->pv_pmap);
@@ -2058,7 +2054,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
}
static int
-mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+mmu_booke_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
{
int i;
vm_offset_t va;
@@ -2076,7 +2072,7 @@ mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
}
void
-mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
+mmu_booke_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
{
vm_paddr_t ppa;
vm_offset_t ofs;
@@ -2102,7 +2098,7 @@ mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
}
void
-mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
+mmu_booke_dumpsys_unmap(vm_paddr_t pa, size_t sz, void *va)
{
vm_paddr_t ppa;
vm_offset_t ofs;
@@ -2143,7 +2139,7 @@ mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
void
-mmu_booke_scan_init(mmu_t mmu)
+mmu_booke_scan_init()
{
vm_offset_t va;
pte_t *pte;
@@ -2182,7 +2178,7 @@ mmu_booke_scan_init(mmu_t mmu)
va = kmi.buffer_eva;
continue;
}
- pte = pte_find(mmu, kernel_pmap, va);
+ pte = pte_find(kernel_pmap, va);
if (pte != NULL && PTE_ISVALID(pte))
break;
va += PAGE_SIZE;
@@ -2195,7 +2191,7 @@ mmu_booke_scan_init(mmu_t mmu)
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
- pte = pte_find(mmu, kernel_pmap, va);
+ pte = pte_find(kernel_pmap, va);
if (pte == NULL || !PTE_ISVALID(pte))
break;
va += PAGE_SIZE;
@@ -2210,10 +2206,10 @@ mmu_booke_scan_init(mmu_t mmu)
* for mapping device memory, NOT real memory.
*/
static void *
-mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
+mmu_booke_mapdev(vm_paddr_t pa, vm_size_t size)
{
- return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
+ return (mmu_booke_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
}
static int
@@ -2232,7 +2228,7 @@ tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e)
}
static void *
-mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
+mmu_booke_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
tlb_entry_t e;
vm_paddr_t tmppa;
@@ -2337,7 +2333,7 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
* 'Unmap' a range mapped by mmu_booke_mapdev().
*/
static void
-mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
+mmu_booke_unmapdev(vm_offset_t va, vm_size_t size)
{
#ifdef SUPPORTS_SHRINKING_TLB1
vm_offset_t base, offset;
@@ -2360,7 +2356,7 @@ mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
* and immediately after an mmap.
*/
static void
-mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
+mmu_booke_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size)
{
@@ -2373,8 +2369,7 @@ mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
* Perform the pmap work for mincore.
*/
static int
-mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
- vm_paddr_t *pap)
+mmu_booke_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
{
/* XXX: this should be implemented at some point */
@@ -2382,8 +2377,7 @@ mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
}
static int
-mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
- vm_memattr_t mode)
+mmu_booke_change_attr(vm_offset_t addr, vm_size_t sz, vm_memattr_t mode)
{
vm_offset_t va;
pte_t *pte;
@@ -2440,7 +2434,7 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
/* Not in TLB1, try through pmap */
/* First validate the range. */
for (va = addr; va < addr + sz; va += PAGE_SIZE) {
- pte = pte_find(mmu, kernel_pmap, va);
+ pte = pte_find(kernel_pmap, va);
if (pte == NULL || !PTE_ISVALID(pte))
return (EINVAL);
}
@@ -2448,7 +2442,7 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
for (va = addr; va < addr + sz; va += PAGE_SIZE) {
- pte = pte_find(mmu, kernel_pmap, va);
+ pte = pte_find(kernel_pmap, va);
*pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT);
*pte |= tlb_calc_wimg(PTE_PA(pte), mode) << PTE_MAS2_SHIFT;
tlb0_flush_entry(va);
@@ -2460,7 +2454,7 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
}
static void
-mmu_booke_page_array_startup(mmu_t mmu, long pages)
+mmu_booke_page_array_startup(long pages)
{
vm_page_array_size = pages;
}
diff --git a/sys/powerpc/booke/pmap_32.c b/sys/powerpc/booke/pmap_32.c
index 3444cbea014d..1f380060494b 100644
--- a/sys/powerpc/booke/pmap_32.c
+++ b/sys/powerpc/booke/pmap_32.c
@@ -97,8 +97,6 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
-#include "mmu_if.h"
-
#define PRI0ptrX "08x"
/* Reserved KVA space and mutex for mmu_booke_zero_page. */
@@ -132,15 +130,15 @@ static struct ptbl_buf *ptbl_buf_alloc(void);
static void ptbl_buf_free(struct ptbl_buf *);
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
-static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
-static void ptbl_free(mmu_t, pmap_t, unsigned int);
-static void ptbl_hold(mmu_t, pmap_t, unsigned int);
-static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
+static pte_t *ptbl_alloc(pmap_t, unsigned int, boolean_t);
+static void ptbl_free(pmap_t, unsigned int);
+static void ptbl_hold(pmap_t, unsigned int);
+static int ptbl_unhold(pmap_t, unsigned int);
-static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
-static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
-static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
-static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
+static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
+static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
+static int pte_remove(pmap_t, vm_offset_t, uint8_t);
+static pte_t *pte_find(pmap_t, vm_offset_t);
struct ptbl_buf {
TAILQ_ENTRY(ptbl_buf) link; /* list link */
@@ -240,7 +238,7 @@ ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
/* Allocate page table. */
static pte_t *
-ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
+ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
{
vm_page_t mtbl[PTBL_PAGES];
vm_page_t m;
@@ -286,7 +284,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
}
/* Map allocated pages into kernel_pmap. */
- mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
+ mmu_booke_qenter((vm_offset_t)ptbl, mtbl, PTBL_PAGES);
/* Zero whole ptbl. */
bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
@@ -299,7 +297,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
/* Free ptbl pages and invalidate pdir entry. */
static void
-ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
+ptbl_free(pmap_t pmap, unsigned int pdir_idx)
{
pte_t *ptbl;
vm_paddr_t pa;
@@ -333,11 +331,11 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
for (i = 0; i < PTBL_PAGES; i++) {
va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
- pa = pte_vatopa(mmu, kernel_pmap, va);
+ pa = pte_vatopa(kernel_pmap, va);
m = PHYS_TO_VM_PAGE(pa);
vm_page_free_zero(m);
vm_wire_sub(1);
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kremove(va);
}
ptbl_free_pmap_ptbl(pmap, ptbl);
@@ -350,7 +348,7 @@ ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
* Return 1 if ptbl pages were freed.
*/
static int
-ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
+ptbl_unhold(pmap_t pmap, unsigned int pdir_idx)
{
pte_t *ptbl;
vm_paddr_t pa;
@@ -373,7 +371,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
/* decrement hold count */
for (i = 0; i < PTBL_PAGES; i++) {
- pa = pte_vatopa(mmu, kernel_pmap,
+ pa = pte_vatopa(kernel_pmap,
(vm_offset_t)ptbl + (i * PAGE_SIZE));
m = PHYS_TO_VM_PAGE(pa);
m->ref_count--;
@@ -385,7 +383,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
* page.
*/
if (m->ref_count == 0) {
- ptbl_free(mmu, pmap, pdir_idx);
+ ptbl_free(pmap, pdir_idx);
//debugf("ptbl_unhold: e (freed ptbl)\n");
return (1);
@@ -399,7 +397,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
* entry is being inserted into the ptbl.
*/
static void
-ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
+ptbl_hold(pmap_t pmap, unsigned int pdir_idx)
{
vm_paddr_t pa;
pte_t *ptbl;
@@ -419,7 +417,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
for (i = 0; i < PTBL_PAGES; i++) {
- pa = pte_vatopa(mmu, kernel_pmap,
+ pa = pte_vatopa(kernel_pmap,
(vm_offset_t)ptbl + (i * PAGE_SIZE));
m = PHYS_TO_VM_PAGE(pa);
m->ref_count++;
@@ -432,7 +430,7 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
* Return 1 if ptbl pages were freed, otherwise return 0.
*/
static int
-pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
+pte_remove(pmap_t pmap, vm_offset_t va, uint8_t flags)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -492,7 +490,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
if (flags & PTBL_UNHOLD) {
//debugf("pte_remove: e (unhold)\n");
- return (ptbl_unhold(mmu, pmap, pdir_idx));
+ return (ptbl_unhold(pmap, pdir_idx));
}
//debugf("pte_remove: e\n");
@@ -503,7 +501,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
* Insert PTE for a given page and virtual address.
*/
static int
-pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
+pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
boolean_t nosleep)
{
unsigned int pdir_idx = PDIR_IDX(va);
@@ -518,7 +516,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
if (ptbl == NULL) {
/* Allocate page table pages. */
- ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
+ ptbl = ptbl_alloc(pmap, pdir_idx, nosleep);
if (ptbl == NULL) {
KASSERT(nosleep, ("nosleep and NULL ptbl"));
return (ENOMEM);
@@ -532,14 +530,14 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
*/
pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
if (PTE_ISVALID(pte)) {
- pte_remove(mmu, pmap, va, PTBL_HOLD);
+ pte_remove(pmap, va, PTBL_HOLD);
} else {
/*
* pte is not used, increment hold count
* for ptbl pages.
*/
if (pmap != kernel_pmap)
- ptbl_hold(mmu, pmap, pdir_idx);
+ ptbl_hold(pmap, pdir_idx);
}
}
@@ -572,12 +570,12 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
/* Return the pa for the given pmap/va. */
static vm_paddr_t
-pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+pte_vatopa(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa = 0;
pte_t *pte;
- pte = pte_find(mmu, pmap, va);
+ pte = pte_find(pmap, va);
if ((pte != NULL) && PTE_ISVALID(pte))
pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
return (pa);
@@ -585,7 +583,7 @@ pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
/* Get a pointer to a PTE in a page table. */
static pte_t *
-pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+pte_find(pmap_t pmap, vm_offset_t va)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -600,7 +598,7 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
static __inline pte_t *
-pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
+pte_find_next(pmap_t pmap, vm_offset_t *pva)
{
vm_offset_t va;
pte_t **pdir;
@@ -691,8 +689,8 @@ mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
*/
-static void
-mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
+static int
+mmu_booke_pinit(pmap_t pmap)
{
int i;
@@ -708,6 +706,8 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
pmap->pm_pdir = uma_zalloc(ptbl_root_zone, M_WAITOK);
bzero(pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
TAILQ_INIT(&pmap->pm_ptbl_list);
+
+ return (1);
}
/*
@@ -716,7 +716,7 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
* Should only be called if the map contains no valid mappings.
*/
static void
-mmu_booke_release(mmu_t mmu, pmap_t pmap)
+mmu_booke_release(pmap_t pmap)
{
KASSERT(pmap->pm_stats.resident_count == 0,
@@ -726,7 +726,7 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
}
static void
-mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
pte_t *pte;
vm_paddr_t pa = 0;
@@ -741,7 +741,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
while (sz > 0) {
PMAP_LOCK(pm);
- pte = pte_find(mmu, pm, va);
+ pte = pte_find(pm, va);
valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
if (valid)
pa = PTE_PA(pte);
@@ -754,11 +754,11 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
addr = 0;
m = PHYS_TO_VM_PAGE(pa);
PMAP_LOCK(pmap);
- pte_enter(mmu, pmap, m, addr,
+ pte_enter(pmap, m, addr,
PTE_SR | PTE_VALID, FALSE);
addr += (va & PAGE_MASK);
__syncicache((void *)addr, sync_sz);
- pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
+ pte_remove(pmap, addr, PTBL_UNHOLD);
PMAP_UNLOCK(pmap);
} else
__syncicache((void *)va, sync_sz);
@@ -777,7 +777,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
* off and size must reside within a single page.
*/
static void
-mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
+mmu_booke_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t va;
@@ -786,9 +786,9 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
mtx_lock(&zero_page_mutex);
va = zero_page_va;
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+ mmu_booke_kenter(va, VM_PAGE_TO_PHYS(m));
bzero((caddr_t)va + off, size);
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kremove(va);
mtx_unlock(&zero_page_mutex);
}
@@ -797,19 +797,19 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
* mmu_booke_zero_page zeros the specified hardware page.
*/
static void
-mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
+mmu_booke_zero_page(vm_page_t m)
{
vm_offset_t off, va;
va = zero_page_va;
mtx_lock(&zero_page_mutex);
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+ mmu_booke_kenter(va, VM_PAGE_TO_PHYS(m));
for (off = 0; off < PAGE_SIZE; off += cacheline_size)
__asm __volatile("dcbz 0,%0" :: "r"(va + off));
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kremove(va);
mtx_unlock(&zero_page_mutex);
}
@@ -820,7 +820,7 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
* one machine dependent page at a time.
*/
static void
-mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
+mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
{
vm_offset_t sva, dva;
@@ -828,18 +828,18 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
dva = copy_page_dst_va;
mtx_lock(&copy_page_mutex);
- mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
- mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
+ mmu_booke_kenter(sva, VM_PAGE_TO_PHYS(sm));
+ mmu_booke_kenter(dva, VM_PAGE_TO_PHYS(dm));
memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
- mmu_booke_kremove(mmu, dva);
- mmu_booke_kremove(mmu, sva);
+ mmu_booke_kremove(dva);
+ mmu_booke_kremove(sva);
mtx_unlock(&copy_page_mutex);
}
static inline void
-mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
{
void *a_cp, *b_cp;
@@ -850,17 +850,17 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
- mmu_booke_kenter(mmu, copy_page_src_va,
+ mmu_booke_kenter(copy_page_src_va,
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
a_cp = (char *)copy_page_src_va + a_pg_offset;
b_pg_offset = b_offset & PAGE_MASK;
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
- mmu_booke_kenter(mmu, copy_page_dst_va,
+ mmu_booke_kenter(copy_page_dst_va,
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
b_cp = (char *)copy_page_dst_va + b_pg_offset;
bcopy(a_cp, b_cp, cnt);
- mmu_booke_kremove(mmu, copy_page_dst_va);
- mmu_booke_kremove(mmu, copy_page_src_va);
+ mmu_booke_kremove(copy_page_dst_va);
+ mmu_booke_kremove(copy_page_src_va);
a_offset += cnt;
b_offset += cnt;
xfersize -= cnt;
@@ -869,7 +869,7 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
}
static vm_offset_t
-mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
+mmu_booke_quick_enter_page(vm_page_t m)
{
vm_paddr_t paddr;
vm_offset_t qaddr;
@@ -885,7 +885,7 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
critical_enter();
qaddr = PCPU_GET(qmap_addr);
- pte = pte_find(mmu, kernel_pmap, qaddr);
+ pte = pte_find(kernel_pmap, qaddr);
KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy"));
@@ -907,11 +907,11 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
}
static void
-mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
+mmu_booke_quick_remove_page(vm_offset_t addr)
{
pte_t *pte;
- pte = pte_find(mmu, kernel_pmap, addr);
+ pte = pte_find(kernel_pmap, addr);
KASSERT(PCPU_GET(qmap_addr) == addr,
("mmu_booke_quick_remove_page: invalid address"));
diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c
index fddc8184a667..35e97e3f9d1b 100644
--- a/sys/powerpc/booke/pmap_64.c
+++ b/sys/powerpc/booke/pmap_64.c
@@ -99,8 +99,6 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
-#include "mmu_if.h"
-
#ifdef DEBUG
#define debugf(fmt, args...) printf(fmt, ##args)
#else
@@ -136,16 +134,16 @@ static unsigned long ilog2(unsigned long);
/**************************************************************************/
#define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES)
-static pte_t *ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va,
+static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
bool nosleep, bool *is_new);
-static void ptbl_hold(mmu_t, pmap_t, pte_t *);
-static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
-
-static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
-static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
-static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
-static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
-static pte_t *pte_find_next(mmu_t, pmap_t, vm_offset_t *);
+static void ptbl_hold(pmap_t, pte_t *);
+static int ptbl_unhold(pmap_t, vm_offset_t);
+
+static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
+static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
+static int pte_remove(pmap_t, vm_offset_t, uint8_t);
+static pte_t *pte_find(pmap_t, vm_offset_t);
+static pte_t *pte_find_next(pmap_t, vm_offset_t *);
static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
/**************************************************************************/
@@ -154,7 +152,7 @@ static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
/* Allocate a page, to be used in a page table. */
static vm_offset_t
-mmu_booke_alloc_page(mmu_t mmu, pmap_t pmap, unsigned int idx, bool nosleep)
+mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
{
vm_page_t m;
int req;
@@ -173,7 +171,7 @@ mmu_booke_alloc_page(mmu_t mmu, pmap_t pmap, unsigned int idx, bool nosleep)
if (!(m->flags & PG_ZERO))
/* Zero whole ptbl. */
- mmu_booke_zero_page(mmu, m);
+ mmu_booke_zero_page(m);
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
@@ -186,7 +184,7 @@ ptbl_init(void)
/* Get a pointer to a PTE in a page table. */
static __inline pte_t *
-pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+pte_find(pmap_t pmap, vm_offset_t va)
{
pte_t ***pdir_l1;
pte_t **pdir;
@@ -207,7 +205,7 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
static __inline pte_t *
-pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
+pte_find_next(pmap_t pmap, vm_offset_t *pva)
{
vm_offset_t va;
pte_t ****pm_root;
@@ -250,7 +248,7 @@ pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
}
static bool
-unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m)
+unhold_free_page(pmap_t pmap, vm_page_t m)
{
m->ref_count--;
@@ -264,7 +262,7 @@ unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m)
}
static vm_offset_t
-alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
+alloc_or_hold_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
bool nosleep, bool hold, bool *isnew)
{
vm_offset_t page;
@@ -274,7 +272,7 @@ alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
KASSERT(page != 0 || pmap != kernel_pmap,
("NULL page table page found in kernel pmap!"));
if (page == 0) {
- page = mmu_booke_alloc_page(mmu, pmap, index, nosleep);
+ page = mmu_booke_alloc_page(pmap, index, nosleep);
if (ptr_tbl[index] == 0) {
*isnew = true;
ptr_tbl[index] = page;
@@ -297,7 +295,7 @@ alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
/* Allocate page table. */
static pte_t*
-ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
+ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
{
unsigned int pg_root_idx = PG_ROOT_IDX(va);
unsigned int pdir_l1_idx = PDIR_L1_IDX(va);
@@ -306,15 +304,15 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
bool hold_page;
hold_page = (pmap != kernel_pmap);
- pdir_l1 = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pmap->pm_root,
+ pdir_l1 = alloc_or_hold_page(pmap, (vm_offset_t *)pmap->pm_root,
pg_root_idx, nosleep, hold_page, is_new);
if (pdir_l1 == 0)
return (NULL);
- pdir = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
+ pdir = alloc_or_hold_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
nosleep, hold_page, is_new);
if (pdir == 0)
return (NULL);
- ptbl = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir, pdir_idx,
+ ptbl = alloc_or_hold_page(pmap, (vm_offset_t *)pdir, pdir_idx,
nosleep, false, is_new);
return ((pte_t *)ptbl);
@@ -327,7 +325,7 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
* Return 1 if ptbl pages were freed.
*/
static int
-ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+ptbl_unhold(pmap_t pmap, vm_offset_t va)
{
pte_t *ptbl;
vm_page_t m;
@@ -351,19 +349,19 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
/* decrement hold count */
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
- if (!unhold_free_page(mmu, pmap, m))
+ if (!unhold_free_page(pmap, m))
return (0);
pdir[pdir_idx] = NULL;
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
- if (!unhold_free_page(mmu, pmap, m))
+ if (!unhold_free_page(pmap, m))
return (1);
pdir_l1[pdir_l1_idx] = NULL;
m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
- if (!unhold_free_page(mmu, pmap, m))
+ if (!unhold_free_page(pmap, m))
return (1);
pmap->pm_root[pg_root_idx] = NULL;
@@ -375,7 +373,7 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
* entry is being inserted into ptbl.
*/
static void
-ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t *ptbl)
+ptbl_hold(pmap_t pmap, pte_t *ptbl)
{
vm_page_t m;
@@ -392,12 +390,12 @@ ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t *ptbl)
* Return 1 if ptbl pages were freed, otherwise return 0.
*/
static int
-pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
+pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
{
vm_page_t m;
pte_t *pte;
- pte = pte_find(mmu, pmap, va);
+ pte = pte_find(pmap, va);
KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
__func__, (uintmax_t)va, pmap));
@@ -440,7 +438,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
pmap->pm_stats.resident_count--;
if (flags & PTBL_UNHOLD) {
- return (ptbl_unhold(mmu, pmap, va));
+ return (ptbl_unhold(pmap, va));
}
return (0);
}
@@ -449,7 +447,7 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
* Insert PTE for a given page and virtual address.
*/
static int
-pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
+pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
boolean_t nosleep)
{
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -457,7 +455,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
bool is_new;
/* Get the page directory pointer. */
- ptbl = ptbl_alloc(mmu, pmap, va, nosleep, &is_new);
+ ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
if (ptbl == NULL) {
KASSERT(nosleep, ("nosleep and NULL ptbl"));
return (ENOMEM);
@@ -471,14 +469,14 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
*/
pte = &ptbl[ptbl_idx];
if (PTE_ISVALID(pte)) {
- pte_remove(mmu, pmap, va, PTBL_HOLD);
+ pte_remove(pmap, va, PTBL_HOLD);
} else {
/*
* pte is not used, increment hold count for ptbl
* pages.
*/
if (pmap != kernel_pmap)
- ptbl_hold(mmu, pmap, ptbl);
+ ptbl_hold(pmap, ptbl);
}
}
@@ -512,12 +510,12 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
/* Return the pa for the given pmap/va. */
static vm_paddr_t
-pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+pte_vatopa(pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa = 0;
pte_t *pte;
- pte = pte_find(mmu, pmap, va);
+ pte = pte_find(pmap, va);
if ((pte != NULL) && PTE_ISVALID(pte))
pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
return (pa);
@@ -599,8 +597,8 @@ mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
*/
-static void
-mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
+static int
+mmu_booke_pinit(pmap_t pmap)
{
int i;
@@ -615,6 +613,8 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
+
+ return (1);
}
/*
@@ -623,7 +623,7 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
* Should only be called if the map contains no valid mappings.
*/
static void
-mmu_booke_release(mmu_t mmu, pmap_t pmap)
+mmu_booke_release(pmap_t pmap)
{
KASSERT(pmap->pm_stats.resident_count == 0,
@@ -633,7 +633,7 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
}
static void
-mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
+mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
pte_t *pte;
vm_paddr_t pa = 0;
@@ -641,7 +641,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
while (sz > 0) {
PMAP_LOCK(pm);
- pte = pte_find(mmu, pm, va);
+ pte = pte_find(pm, va);
valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
if (valid)
pa = PTE_PA(pte);
@@ -665,7 +665,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
* off and size must reside within a single page.
*/
static void
-mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
+mmu_booke_zero_page_area(vm_page_t m, int off, int size)
{
vm_offset_t va;
@@ -679,7 +679,7 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
* mmu_booke_zero_page zeros the specified hardware page.
*/
static void
-mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
+mmu_booke_zero_page(vm_page_t m)
{
vm_offset_t off, va;
@@ -695,7 +695,7 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
* one machine dependent page at a time.
*/
static void
-mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
+mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
{
vm_offset_t sva, dva;
@@ -705,7 +705,7 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
}
static inline void
-mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
+mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
vm_page_t *mb, vm_offset_t b_offset, int xfersize)
{
void *a_cp, *b_cp;
@@ -733,13 +733,13 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
}
static vm_offset_t
-mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
+mmu_booke_quick_enter_page(vm_page_t m)
{
return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
}
static void
-mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
+mmu_booke_quick_remove_page(vm_offset_t addr)
{
}
diff --git a/sys/powerpc/include/mmuvar.h b/sys/powerpc/include/mmuvar.h
index 342848e43a5c..e284e2b64847 100644
--- a/sys/powerpc/include/mmuvar.h
+++ b/sys/powerpc/include/mmuvar.h
@@ -31,84 +31,170 @@
#ifndef _MACHINE_MMUVAR_H_
#define _MACHINE_MMUVAR_H_
-/*
- * A PowerPC MMU implementation is declared with a kernel object and
- * an associated method table. The MMU_DEF macro is used to declare
- * the class, and also links it to the global MMU class list.
- *
- * e.g.
- *
- * static mmu_method_t ppc8xx_methods[] = {
- * MMUMETHOD(mmu_change_wiring, ppc8xx_mmu_change_wiring),
- * MMUMETHOD(mmu_clear_modify, ppc8xx_mmu_clear_modify),
- * MMUMETHOD(mmu_clear_reference, ppc8xx_mmu_clear_reference),
- * ...
- * MMUMETHOD(mmu_dev_direct_mapped, ppc8xx_mmu_dev_direct_mapped),
- * { 0, 0 }
- * };
- *
- * MMU_DEF(ppc8xx, MMU_TYPE_8xx, ppc8xx_methods, sizeof(ppc8xx_mmu_softc));
- *
- * A single level of inheritance is supported in a similar fashion to
- * kobj inheritance e.g.
- *
- * MMU_DEF_1(ppc860c, MMU_TYPE_860c, ppc860c_methods, 0, ppc8xx);
- */
+typedef void (*pmap_bootstrap_t)(vm_offset_t, vm_offset_t);
+typedef void (*pmap_cpu_bootstrap_t)(int);
+typedef void (*pmap_kenter_t)(vm_offset_t, vm_paddr_t pa);
+typedef void (*pmap_kenter_attr_t)(vm_offset_t, vm_paddr_t, vm_memattr_t);
+typedef void (*pmap_kremove_t)(vm_offset_t);
+typedef void *(*pmap_mapdev_t)(vm_paddr_t, vm_size_t);
+typedef void *(*pmap_mapdev_attr_t)(vm_paddr_t, vm_size_t, vm_memattr_t);
+typedef void (*pmap_unmapdev_t)(vm_offset_t, vm_size_t);
+typedef void (*pmap_page_set_memattr_t)(vm_page_t, vm_memattr_t);
+typedef int (*pmap_change_attr_t)(vm_offset_t, vm_size_t, vm_memattr_t);
+typedef int (*pmap_map_user_ptr_t)(pmap_t, volatile const void *,
+ void **, size_t, size_t *);
+typedef int (*pmap_decode_kernel_ptr_t)(vm_offset_t, int *, vm_offset_t *);
+typedef vm_paddr_t (*pmap_kextract_t)(vm_offset_t);
+typedef int (*pmap_dev_direct_mapped_t)(vm_paddr_t, vm_size_t);
-#include <sys/kobj.h>
+typedef void (*pmap_page_array_startup_t)(long);
+typedef void (*pmap_advise_t)(pmap_t, vm_offset_t, vm_offset_t, int);
+typedef void (*pmap_clear_modify_t)(vm_page_t);
+typedef void (*pmap_remove_write_t)(vm_page_t);
+typedef void (*pmap_copy_t)(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
+typedef void (*pmap_copy_page_t)(vm_page_t, vm_page_t);
+typedef void (*pmap_copy_pages_t)(vm_page_t *, vm_offset_t,
+ vm_page_t *, vm_offset_t, int);
+typedef int (*pmap_enter_t)(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
+ u_int, int8_t);
+typedef void (*pmap_enter_object_t)(pmap_t, vm_offset_t, vm_offset_t,
+ vm_page_t, vm_prot_t);
+typedef void (*pmap_enter_quick_t)(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+typedef vm_paddr_t (*pmap_extract_t)(pmap_t, vm_offset_t);
+typedef vm_page_t (*pmap_extract_and_hold_t)(pmap_t, vm_offset_t, vm_prot_t);
+typedef void (*pmap_growkernel_t)(vm_offset_t);
+typedef void (*pmap_init_t)(void);
+typedef boolean_t (*pmap_is_modified_t)(vm_page_t);
+typedef boolean_t (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t);
+typedef boolean_t (*pmap_is_referenced_t)(vm_page_t);
+typedef int (*pmap_ts_referenced_t)(vm_page_t);
+typedef vm_offset_t (*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
+typedef void (*pmap_object_init_pt_t)(pmap_t, vm_offset_t, vm_object_t,
+ vm_pindex_t, vm_size_t);
+typedef boolean_t (*pmap_page_exists_quick_t)(pmap_t, vm_page_t);
+typedef boolean_t (*pmap_page_is_mapped_t)(vm_page_t);
+typedef void (*pmap_page_init_t)(vm_page_t);
+typedef int (*pmap_page_wired_mappings_t)(vm_page_t);
+typedef void (*pmap_pinit0_t)(pmap_t);
+typedef void (*pmap_protect_t)(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+typedef void (*pmap_qenter_t)(vm_offset_t, vm_page_t *, int);
+typedef void (*pmap_qremove_t)(vm_offset_t, int);
+typedef void (*pmap_release_t)(pmap_t);
+typedef void (*pmap_remove_t)(pmap_t, vm_offset_t, vm_offset_t);
+typedef void (*pmap_remove_all_t)(vm_page_t);
+typedef void (*pmap_remove_pages_t)(pmap_t);
+typedef void (*pmap_unwire_t)(pmap_t, vm_offset_t, vm_offset_t);
+typedef void (*pmap_zero_page_t)(vm_page_t);
+typedef void (*pmap_zero_page_area_t)(vm_page_t, int, int);
+typedef int (*pmap_mincore_t)(pmap_t, vm_offset_t, vm_paddr_t *);
+typedef void (*pmap_activate_t)(struct thread *);
+typedef void (*pmap_deactivate_t)(struct thread *);
+typedef void (*pmap_align_superpage_t)(vm_object_t, vm_ooffset_t,
+ vm_offset_t *, vm_size_t);
-struct mmu_kobj {
- /*
- * An MMU instance is a kernel object
- */
- KOBJ_FIELDS;
+typedef void (*pmap_sync_icache_t)(pmap_t, vm_offset_t, vm_size_t);
+typedef void (*pmap_dumpsys_map_chunk_t)(vm_paddr_t, size_t, void **);
+typedef void (*pmap_dumpsys_unmap_chunk_t)(vm_paddr_t, size_t, void *);
+typedef void (*pmap_dumpsys_pa_init_t)(void);
+typedef size_t (*pmap_dumpsys_scan_pmap_t)(void);
+typedef void *(*pmap_dumpsys_dump_pmap_init_t)(unsigned);
+typedef void *(*pmap_dumpsys_dump_pmap_t)(void *, void *, u_long *);
+typedef vm_offset_t (*pmap_quick_enter_page_t)(vm_page_t);
+typedef void (*pmap_quick_remove_page_t)(vm_offset_t);
+typedef bool (*pmap_ps_enabled_t)(pmap_t);
+typedef void (*pmap_tlbie_all_t)(void);
+typedef void (*pmap_installer_t)(void);
- /*
- * Utility elements that an instance may use
- */
- struct mtx mmu_mtx; /* available for instance use */
- void *mmu_iptr; /* instance data pointer */
+struct pmap_funcs {
+ pmap_installer_t install;
+ pmap_bootstrap_t bootstrap;
+ pmap_cpu_bootstrap_t cpu_bootstrap;
+ pmap_kenter_t kenter;
+ pmap_kenter_attr_t kenter_attr;
+ pmap_kremove_t kremove;
+ pmap_mapdev_t mapdev;
+ pmap_mapdev_attr_t mapdev_attr;
+ pmap_unmapdev_t unmapdev;
+ pmap_page_set_memattr_t page_set_memattr;
+ pmap_change_attr_t change_attr;
+ pmap_map_user_ptr_t map_user_ptr;
+ pmap_decode_kernel_ptr_t decode_kernel_ptr;
+ pmap_kextract_t kextract;
+ pmap_dev_direct_mapped_t dev_direct_mapped;
+ pmap_advise_t advise;
+ pmap_clear_modify_t clear_modify;
+ pmap_remove_write_t remove_write;
+ pmap_copy_t copy;
+ pmap_copy_page_t copy_page;
+ pmap_copy_pages_t copy_pages;
+ pmap_enter_t enter;
+ pmap_enter_object_t enter_object;
+ pmap_enter_quick_t enter_quick;
+ pmap_extract_t extract;
+ pmap_extract_and_hold_t extract_and_hold;
+ pmap_growkernel_t growkernel;
+ pmap_init_t init;
+ pmap_is_modified_t is_modified;
+ pmap_is_prefaultable_t is_prefaultable;
+ pmap_is_referenced_t is_referenced;
+ pmap_ts_referenced_t ts_referenced;
+ pmap_page_is_mapped_t page_is_mapped;
+ pmap_ps_enabled_t ps_enabled;
+ pmap_map_t map;
+ pmap_object_init_pt_t object_init_pt;
+ pmap_page_exists_quick_t page_exists_quick;
+ pmap_page_init_t page_init;
+ pmap_page_wired_mappings_t page_wired_mappings;
+ pmap_pinit_t pinit;
+ pmap_pinit0_t pinit0;
+ pmap_protect_t protect;
+ pmap_qenter_t qenter;
+ pmap_qremove_t qremove;
+ pmap_release_t release;
+ pmap_remove_t remove;
+ pmap_remove_all_t remove_all;
+ pmap_remove_pages_t remove_pages;
+ pmap_unwire_t unwire;
+ pmap_zero_page_t zero_page;
+ pmap_zero_page_area_t zero_page_area;
+ pmap_mincore_t mincore;
+ pmap_activate_t activate;
+ pmap_deactivate_t deactivate;
+ pmap_align_superpage_t align_superpage;
+ pmap_sync_icache_t sync_icache;
+ pmap_quick_enter_page_t quick_enter_page;
+ pmap_quick_remove_page_t quick_remove_page;
+ pmap_page_array_startup_t page_array_startup;
+ pmap_dumpsys_map_chunk_t dumpsys_map_chunk;
+ pmap_dumpsys_unmap_chunk_t dumpsys_unmap_chunk;
+ pmap_dumpsys_pa_init_t dumpsys_pa_init;
+ pmap_dumpsys_scan_pmap_t dumpsys_scan_pmap;
+ pmap_dumpsys_dump_pmap_init_t dumpsys_dump_pmap_init;
+ pmap_dumpsys_dump_pmap_t dumpsys_dump_pmap;
+ pmap_tlbie_all_t tlbie_all;
- /*
- * Opaque data that can be overlaid with an instance-private
- * structure. MMU code can test that this is large enough at
- * compile time with a sizeof() test againt it's softc. There
- * is also a run-time test when the MMU kernel object is
- * registered.
- */
-#define MMU_OPAQUESZ 64
- u_int mmu_opaque[MMU_OPAQUESZ];
+};
+struct mmu_kobj {
+ const char *name;
+ const struct mmu_kobj *base;
+ const struct pmap_funcs *funcs;
};
typedef struct mmu_kobj *mmu_t;
-typedef struct kobj_class mmu_def_t;
-#define mmu_method_t kobj_method_t
-#define MMUMETHOD KOBJMETHOD
-
-#define MMU_DEF(name, ident, methods, size) \
+#define MMU_DEF(name, ident, methods) \
\
-mmu_def_t name = { \
- ident, methods, size, NULL \
+const struct mmu_kobj name = { \
+ ident, NULL, &methods \
}; \
DATA_SET(mmu_set, name)
-#define MMU_DEF_INHERIT(name, ident, methods, size, base1) \
+#define MMU_DEF_INHERIT(name, ident, methods, base1) \
\
-static kobj_class_t name ## _baseclasses[] = \
- { &base1, NULL }; \
-mmu_def_t name = { \
- ident, methods, size, name ## _baseclasses \
-}; \
-DATA_SET(mmu_set, name)
-
-
-#if 0
-mmu_def_t name = { \
- ident, methods, size, name ## _baseclasses \
-};
+const struct mmu_kobj name = { \
+ ident, &base1, &methods, \
+}; \
DATA_SET(mmu_set, name)
-#endif
/*
* Known MMU names
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index 4719ffaacde2..edde0d73d043 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -319,6 +319,7 @@ void pmap_deactivate(struct thread *);
vm_paddr_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
boolean_t pmap_mmu_install(char *name, int prio);
+void pmap_mmu_init(void);
const char *pmap_mmu_name(void);
bool pmap_ps_enabled(pmap_t pmap);
int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags);
diff --git a/sys/powerpc/powerpc/machdep.c b/sys/powerpc/powerpc/machdep.c
index 5fc049bce456..0430773a9133 100644
--- a/sys/powerpc/powerpc/machdep.c
+++ b/sys/powerpc/powerpc/machdep.c
@@ -466,9 +466,10 @@ powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
/*
* Bring up MMU
*/
+ pmap_mmu_init();
+ link_elf_ireloc(kmdp);
pmap_bootstrap(startkernel, endkernel);
mtmsr(psl_kernset & ~PSL_EE);
- link_elf_ireloc(kmdp);
/*
* Initialize params/tunables that are derived from memsize
diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m
deleted file mode 100644
index f57d5b437309..000000000000
--- a/sys/powerpc/powerpc/mmu_if.m
+++ /dev/null
@@ -1,1110 +0,0 @@
-#-
-# Copyright (c) 2005 Peter Grehan
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-# $FreeBSD$
-#
-
-#include <sys/param.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/systm.h>
-
-#include <vm/vm.h>
-#include <vm/vm_page.h>
-
-#include <machine/mmuvar.h>
-
-/**
- * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
- * @brief A set of methods required by all MMU implementations. These
- * are basically direct call-thru's from the pmap machine-dependent
- * code.
- * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
- *@{
- */
-
-INTERFACE mmu;
-SINGLETON;
-
-#
-# Default implementations of some methods
-#
-CODE {
- static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
- vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
- {
- return;
- }
-
- static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
- {
- return;
- }
-
- static void mmu_null_init(mmu_t mmu)
- {
- return;
- }
-
- static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
- vm_offset_t va)
- {
- return (FALSE);
- }
-
- static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
- vm_offset_t addr, vm_object_t object, vm_pindex_t index,
- vm_size_t size)
- {
- return;
- }
-
- static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
- {
- return;
- }
-
- static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
- {
- return;
- }
-
- static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
- vm_paddr_t *pap)
- {
- return (0);
- }
-
- static void mmu_null_deactivate(struct thread *td)
- {
- return;
- }
-
- static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
- vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
- {
- return;
- }
-
- static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
- vm_size_t size, vm_memattr_t ma)
- {
- return MMU_MAPDEV(mmu, pa, size);
- }
-
- static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
- vm_paddr_t pa, vm_memattr_t ma)
- {
- MMU_KENTER(mmu, va, pa);
- }
-
- static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
- vm_memattr_t ma)
- {
- return;
- }
-
- static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
- vm_size_t sz, vm_memattr_t mode)
- {
- return (0);
- }
-
- static size_t mmu_null_scan_pmap(mmu_t mmu)
- {
- return (0);
- }
-
- static void *mmu_null_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
- {
- return (NULL);
- }
-
- static void * mmu_null_dump_pmap(mmu_t mmu, void *ctx, void *buf,
- u_long *nbytes)
- {
- return (NULL);
- }
-
- static boolean_t mmu_null_ps_enabled(mmu_t mmu)
- {
- return (FALSE);
- }
-};
-
-
-/**
- * @brief Apply the given advice to the specified range of addresses within
- * the given pmap. Depending on the advice, clear the referenced and/or
- * modified flags in each mapping and set the mapped page's dirty field.
- *
- * @param _pmap physical map
- * @param _start virtual range start
- * @param _end virtual range end
- * @param _advice advice to apply
- */
-METHOD void advise {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _start;
- vm_offset_t _end;
- int _advice;
-};
-
-
-/**
- * @brief Clear the 'modified' bit on the given physical page
- *
- * @param _pg physical page
- */
-METHOD void clear_modify {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Clear the write and modified bits in each of the given
- * physical page's mappings
- *
- * @param _pg physical page
- */
-METHOD void remove_write {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Copy the address range given by the source physical map, virtual
- * address and length to the destination physical map and virtual address.
- * This routine is optional (xxx default null implementation ?)
- *
- * @param _dst_pmap destination physical map
- * @param _src_pmap source physical map
- * @param _dst_addr destination virtual address
- * @param _len size of range
- * @param _src_addr source virtual address
- */
-METHOD void copy {
- mmu_t _mmu;
- pmap_t _dst_pmap;
- pmap_t _src_pmap;
- vm_offset_t _dst_addr;
- vm_size_t _len;
- vm_offset_t _src_addr;
-} DEFAULT mmu_null_copy;
-
-
-/**
- * @brief Copy the source physical page to the destination physical page
- *
- * @param _src source physical page
- * @param _dst destination physical page
- */
-METHOD void copy_page {
- mmu_t _mmu;
- vm_page_t _src;
- vm_page_t _dst;
-};
-
-METHOD void copy_pages {
- mmu_t _mmu;
- vm_page_t *_ma;
- vm_offset_t _a_offset;
- vm_page_t *_mb;
- vm_offset_t _b_offset;
- int _xfersize;
-};
-
-/**
- * @brief Create a mapping between a virtual/physical address pair in the
- * passed physical map with the specified protection and wiring
- *
- * @param _pmap physical map
- * @param _va mapping virtual address
- * @param _p mapping physical page
- * @param _prot mapping page protection
- * @param _flags pmap_enter flags
- * @param _psind superpage size index
- */
-METHOD int enter {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _va;
- vm_page_t _p;
- vm_prot_t _prot;
- u_int _flags;
- int8_t _psind;
-};
-
-
-/**
- * @brief Maps a sequence of resident pages belonging to the same object.
- *
- * @param _pmap physical map
- * @param _start virtual range start
- * @param _end virtual range end
- * @param _m_start physical page mapped at start
- * @param _prot mapping page protection
- */
-METHOD void enter_object {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _start;
- vm_offset_t _end;
- vm_page_t _m_start;
- vm_prot_t _prot;
-};
-
-
-/**
- * @brief A faster entry point for page mapping where it is possible
- * to short-circuit some of the tests in pmap_enter.
- *
- * @param _pmap physical map (and also currently active pmap)
- * @param _va mapping virtual address
- * @param _pg mapping physical page
- * @param _prot new page protection - used to see if page is exec.
- */
-METHOD void enter_quick {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _va;
- vm_page_t _pg;
- vm_prot_t _prot;
-};
-
-
-/**
- * @brief Reverse map the given virtual address, returning the physical
- * page associated with the address if a mapping exists.
- *
- * @param _pmap physical map
- * @param _va mapping virtual address
- *
- * @retval 0 No mapping found
- * @retval addr The mapping physical address
- */
-METHOD vm_paddr_t extract {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _va;
-};
-
-
-/**
- * @brief Reverse map the given virtual address, returning the
- * physical page if found. The page must be held (by calling
- * vm_page_hold) if the page protection matches the given protection
- *
- * @param _pmap physical map
- * @param _va mapping virtual address
- * @param _prot protection used to determine if physical page
- * should be locked
- *
- * @retval NULL No mapping found
- * @retval page Pointer to physical page. Held if protections match
- */
-METHOD vm_page_t extract_and_hold {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _va;
- vm_prot_t _prot;
-};
-
-
-/**
- * @brief Increase kernel virtual address space to the given virtual address.
- * Not really required for PowerPC, so optional unless the MMU implementation
- * can use it.
- *
- * @param _va new upper limit for kernel virtual address space
- */
-METHOD void growkernel {
- mmu_t _mmu;
- vm_offset_t _va;
-} DEFAULT mmu_null_growkernel;
-
-
-/**
- * @brief Called from vm_mem_init. Zone allocation is available at
- * this stage so a convenient time to create zones. This routine is
- * for MMU-implementation convenience and is optional.
- */
-METHOD void init {
- mmu_t _mmu;
-} DEFAULT mmu_null_init;
-
-
-/**
- * @brief Return if the page has been marked by MMU hardware to have been
- * modified
- *
- * @param _pg physical page to test
- *
- * @retval boolean TRUE if page has been modified
- */
-METHOD boolean_t is_modified {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Return whether the specified virtual address is a candidate to be
- * prefaulted in. This routine is optional.
- *
- * @param _pmap physical map
- * @param _va virtual address to test
- *
- * @retval boolean TRUE if the address is a candidate.
- */
-METHOD boolean_t is_prefaultable {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _va;
-} DEFAULT mmu_null_is_prefaultable;
-
-
-/**
- * @brief Return whether or not the specified physical page was referenced
- * in any physical maps.
- *
- * @params _pg physical page
- *
- * @retval boolean TRUE if page has been referenced
- */
-METHOD boolean_t is_referenced {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Return a count of referenced bits for a page, clearing those bits.
- * Not all referenced bits need to be cleared, but it is necessary that 0
- * only be returned when there are none set.
- *
- * @params _m physical page
- *
- * @retval int count of referenced bits
- */
-METHOD int ts_referenced {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Map the requested physical address range into kernel virtual
- * address space. The value in _virt is taken as a hint. The virtual
- * address of the range is returned, or NULL if the mapping could not
- * be created. The range can be direct-mapped if that is supported.
- *
- * @param *_virt Hint for start virtual address, and also return
- * value
- * @param _start physical address range start
- * @param _end physical address range end
- * @param _prot protection of range (currently ignored)
- *
- * @retval NULL could not map the area
- * @retval addr, *_virt mapping start virtual address
- */
-METHOD vm_offset_t map {
- mmu_t _mmu;
- vm_offset_t *_virt;
- vm_paddr_t _start;
- vm_paddr_t _end;
- int _prot;
-};
-
-
-/**
- * @brief Used to create a contiguous set of read-only mappings for a
- * given object to try and eliminate a cascade of on-demand faults as
- * the object is accessed sequentially. This routine is optional.
- *
- * @param _pmap physical map
- * @param _addr mapping start virtual address
- * @param _object device-backed V.M. object to be mapped
- * @param _pindex page-index within object of mapping start
- * @param _size size in bytes of mapping
- */
-METHOD void object_init_pt {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _addr;
- vm_object_t _object;
- vm_pindex_t _pindex;
- vm_size_t _size;
-} DEFAULT mmu_null_object_init_pt;
-
-
-/**
- * @brief Used to determine if the specified page has a mapping for the
- * given physical map, by scanning the list of reverse-mappings from the
- * page. The list is scanned to a maximum of 16 entries.
- *
- * @param _pmap physical map
- * @param _pg physical page
- *
- * @retval bool TRUE if the physical map was found in the first 16
- * reverse-map list entries off the physical page.
- */
-METHOD boolean_t page_exists_quick {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Initialise the machine-dependent section of the physical page
- * data structure. This routine is optional.
- *
- * @param _pg physical page
- */
-METHOD void page_init {
- mmu_t _mmu;
- vm_page_t _pg;
-} DEFAULT mmu_null_page_init;
-
-
-/**
- * @brief Count the number of managed mappings to the given physical
- * page that are wired.
- *
- * @param _pg physical page
- *
- * @retval int the number of wired, managed mappings to the
- * given physical page
- */
-METHOD int page_wired_mappings {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Initialise a physical map data structure
- *
- * @param _pmap physical map
- */
-METHOD void pinit {
- mmu_t _mmu;
- pmap_t _pmap;
-};
-
-
-/**
- * @brief Initialise the physical map for process 0, the initial process
- * in the system.
- * XXX default to pinit ?
- *
- * @param _pmap physical map
- */
-METHOD void pinit0 {
- mmu_t _mmu;
- pmap_t _pmap;
-};
-
-
-/**
- * @brief Set the protection for physical pages in the given virtual address
- * range to the given value.
- *
- * @param _pmap physical map
- * @param _start virtual range start
- * @param _end virtual range end
- * @param _prot new page protection
- */
-METHOD void protect {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _start;
- vm_offset_t _end;
- vm_prot_t _prot;
-};
-
-
-/**
- * @brief Create a mapping in kernel virtual address space for the given array
- * of wired physical pages.
- *
- * @param _start mapping virtual address start
- * @param *_m array of physical page pointers
- * @param _count array elements
- */
-METHOD void qenter {
- mmu_t _mmu;
- vm_offset_t _start;
- vm_page_t *_pg;
- int _count;
-};
-
-
-/**
- * @brief Remove the temporary mappings created by qenter.
- *
- * @param _start mapping virtual address start
- * @param _count number of pages in mapping
- */
-METHOD void qremove {
- mmu_t _mmu;
- vm_offset_t _start;
- int _count;
-};
-
-
-/**
- * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
- * should be no existing mappings for the physical map at this point
- *
- * @param _pmap physical map
- */
-METHOD void release {
- mmu_t _mmu;
- pmap_t _pmap;
-};
-
-
-/**
- * @brief Remove all mappings in the given physical map for the start/end
- * virtual address range. The range will be page-aligned.
- *
- * @param _pmap physical map
- * @param _start mapping virtual address start
- * @param _end mapping virtual address end
- */
-METHOD void remove {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _start;
- vm_offset_t _end;
-};
-
-
-/**
- * @brief Traverse the reverse-map list off the given physical page and
- * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
- *
- * @param _pg physical page
- */
-METHOD void remove_all {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Remove all mappings in the given start/end virtual address range
- * for the given physical map. Similar to the remove method, but it used
- * when tearing down all mappings in an address space. This method is
- * optional, since pmap_remove will be called for each valid vm_map in
- * the address space later.
- *
- * @param _pmap physical map
- * @param _start mapping virtual address start
- * @param _end mapping virtual address end
- */
-METHOD void remove_pages {
- mmu_t _mmu;
- pmap_t _pmap;
-} DEFAULT mmu_null_remove_pages;
-
-
-/**
- * @brief Clear the wired attribute from the mappings for the specified range
- * of addresses in the given pmap.
- *
- * @param _pmap physical map
- * @param _start virtual range start
- * @param _end virtual range end
- */
-METHOD void unwire {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _start;
- vm_offset_t _end;
-};
-
-
-/**
- * @brief Zero a physical page. It is not assumed that the page is mapped,
- * so a temporary (or direct) mapping may need to be used.
- *
- * @param _pg physical page
- */
-METHOD void zero_page {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-
-/**
- * @brief Zero a portion of a physical page, starting at a given offset and
- * for a given size (multiples of 512 bytes for 4k pages).
- *
- * @param _pg physical page
- * @param _off byte offset from start of page
- * @param _size size of area to zero
- */
-METHOD void zero_page_area {
- mmu_t _mmu;
- vm_page_t _pg;
- int _off;
- int _size;
-};
-
-
-/**
- * @brief Extract mincore(2) information from a mapping.
- *
- * @param _pmap physical map
- * @param _addr page virtual address
- * @param _pa page physical address
- *
- * @retval 0 no result
- * @retval non-zero mincore(2) flag values
- */
-METHOD int mincore {
- mmu_t _mmu;
- pmap_t _pmap;
- vm_offset_t _addr;
- vm_paddr_t *_pap;
-} DEFAULT mmu_null_mincore;
-
-
-/**
- * @brief Perform any operations required to allow a physical map to be used
- * before it's address space is accessed.
- *
- * @param _td thread associated with physical map
- */
-METHOD void activate {
- mmu_t _mmu;
- struct thread *_td;
-};
-
-/**
- * @brief Perform any operations required to deactivate a physical map,
- * for instance as it is context-switched out.
- *
- * @param _td thread associated with physical map
- */
-METHOD void deactivate {
- mmu_t _mmu;
- struct thread *_td;
-} DEFAULT mmu_null_deactivate;
-
-/**
- * @brief Return a hint for the best virtual address to map a tentative
- * virtual address range in a given VM object. The default is to just
- * return the given tentative start address.
- *
- * @param _obj VM backing object
- * @param _offset starting offset with the VM object
- * @param _addr initial guess at virtual address
- * @param _size size of virtual address range
- */
-METHOD void align_superpage {
- mmu_t _mmu;
- vm_object_t _obj;
- vm_ooffset_t _offset;
- vm_offset_t *_addr;
- vm_size_t _size;
-} DEFAULT mmu_null_align_superpage;
-
-
-
-
-/**
- * INTERNAL INTERFACES
- */
-
-/**
- * @brief Bootstrap the VM system. At the completion of this routine, the
- * kernel will be running in its own address space with full control over
- * paging.
- *
- * @param _start start of reserved memory (obsolete ???)
- * @param _end end of reserved memory (obsolete ???)
- * XXX I think the intent of these was to allow
- * the memory used by kernel text+data+bss and
- * loader variables/load-time kld's to be carved out
- * of available physical mem.
- *
- */
-METHOD void bootstrap {
- mmu_t _mmu;
- vm_offset_t _start;
- vm_offset_t _end;
-};
-
-/**
- * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
- * for alternate CPUs on SMP systems.
- *
- * @param _ap Set to 1 if the CPU being set up is an AP
- *
- */
-METHOD void cpu_bootstrap {
- mmu_t _mmu;
- int _ap;
-};
-
-
-/**
- * @brief Create a kernel mapping for a given physical address range.
- * Called by bus code on behalf of device drivers. The mapping does not
- * have to be a virtual address: it can be a direct-mapped physical address
- * if that is supported by the MMU.
- *
- * @param _pa start physical address
- * @param _size size in bytes of mapping
- *
- * @retval addr address of mapping.
- */
-METHOD void * mapdev {
- mmu_t _mmu;
- vm_paddr_t _pa;
- vm_size_t _size;
-};
-
-/**
- * @brief Create a kernel mapping for a given physical address range.
- * Called by bus code on behalf of device drivers. The mapping does not
- * have to be a virtual address: it can be a direct-mapped physical address
- * if that is supported by the MMU.
- *
- * @param _pa start physical address
- * @param _size size in bytes of mapping
- * @param _attr cache attributes
- *
- * @retval addr address of mapping.
- */
-METHOD void * mapdev_attr {
- mmu_t _mmu;
- vm_paddr_t _pa;
- vm_size_t _size;
- vm_memattr_t _attr;
-} DEFAULT mmu_null_mapdev_attr;
-
-/**
- * @brief Change cache control attributes for a page. Should modify all
- * mappings for that page.
- *
- * @param _m page to modify
- * @param _ma new cache control attributes
- */
-METHOD void page_set_memattr {
- mmu_t _mmu;
- vm_page_t _pg;
- vm_memattr_t _ma;
-} DEFAULT mmu_null_page_set_memattr;
-
-/**
- * @brief Remove the mapping created by mapdev. Called when a driver
- * is unloaded.
- *
- * @param _va Mapping address returned from mapdev
- * @param _size size in bytes of mapping
- */
-METHOD void unmapdev {
- mmu_t _mmu;
- vm_offset_t _va;
- vm_size_t _size;
-};
-
-/**
- * @brief Provide a kernel-space pointer that can be used to access the
- * given userland address. The kernel accessible length returned in klen
- * may be less than the requested length of the userland buffer (ulen). If
- * so, retry with a higher address to get access to the later parts of the
- * buffer. Returns EFAULT if no mapping can be made, else zero.
- *
- * @param _pm PMAP for the user pointer.
- * @param _uaddr Userland address to map.
- * @param _kaddr Corresponding kernel address.
- * @param _ulen Length of user buffer.
- * @param _klen Available subset of ulen with _kaddr.
- */
-METHOD int map_user_ptr {
- mmu_t _mmu;
- pmap_t _pm;
- volatile const void *_uaddr;
- void **_kaddr;
- size_t _ulen;
- size_t *_klen;
-};
-
-/**
- * @brief Decode a kernel pointer, as visible to the current thread,
- * by setting whether it corresponds to a user or kernel address and
- * the address in the respective memory maps to which the address as
- * seen in the kernel corresponds. This is essentially the inverse of
- * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
- * Returns 0 on success or EFAULT if the address could not be mapped.
- */
-METHOD int decode_kernel_ptr {
- mmu_t _mmu;
- vm_offset_t addr;
- int *is_user;
- vm_offset_t *decoded_addr;
-};
-
-/**
- * @brief Reverse-map a kernel virtual address
- *
- * @param _va kernel virtual address to reverse-map
- *
- * @retval pa physical address corresponding to mapping
- */
-METHOD vm_paddr_t kextract {
- mmu_t _mmu;
- vm_offset_t _va;
-};
-
-
-/**
- * @brief Map a wired page into kernel virtual address space
- *
- * @param _va mapping virtual address
- * @param _pa mapping physical address
- */
-METHOD void kenter {
- mmu_t _mmu;
- vm_offset_t _va;
- vm_paddr_t _pa;
-};
-
-/**
- * @brief Map a wired page into kernel virtual address space
- *
- * @param _va mapping virtual address
- * @param _pa mapping physical address
- * @param _ma mapping cache control attributes
- */
-METHOD void kenter_attr {
- mmu_t _mmu;
- vm_offset_t _va;
- vm_paddr_t _pa;
- vm_memattr_t _ma;
-} DEFAULT mmu_null_kenter_attr;
-
-/**
- * @brief Unmap a wired page from kernel virtual address space
- *
- * @param _va mapped virtual address
- */
-METHOD void kremove {
- mmu_t _mmu;
- vm_offset_t _va;
-};
-
-/**
- * @brief Determine if the given physical address range has been direct-mapped.
- *
- * @param _pa physical address start
- * @param _size physical address range size
- *
- * @retval bool TRUE if the range is direct-mapped.
- */
-METHOD boolean_t dev_direct_mapped {
- mmu_t _mmu;
- vm_paddr_t _pa;
- vm_size_t _size;
-};
-
-
-/**
- * @brief Enforce instruction cache coherency. Typically called after a
- * region of memory has been modified and before execution of or within
- * that region is attempted. Setting breakpoints in a process through
- * ptrace(2) is one example of when the instruction cache needs to be
- * made coherent.
- *
- * @param _pm the physical map of the virtual address
- * @param _va the virtual address of the modified region
- * @param _sz the size of the modified region
- */
-METHOD void sync_icache {
- mmu_t _mmu;
- pmap_t _pm;
- vm_offset_t _va;
- vm_size_t _sz;
-};
-
-
-/**
- * @brief Create temporary memory mapping for use by dumpsys().
- *
- * @param _pa The physical page to map.
- * @param _sz The requested size of the mapping.
- * @param _va The virtual address of the mapping.
- */
-METHOD void dumpsys_map {
- mmu_t _mmu;
- vm_paddr_t _pa;
- size_t _sz;
- void **_va;
-};
-
-
-/**
- * @brief Remove temporary dumpsys() mapping.
- *
- * @param _pa The physical page to map.
- * @param _sz The requested size of the mapping.
- * @param _va The virtual address of the mapping.
- */
-METHOD void dumpsys_unmap {
- mmu_t _mmu;
- vm_paddr_t _pa;
- size_t _sz;
- void *_va;
-};
-
-
-/**
- * @brief Initialize memory chunks for dumpsys.
- */
-METHOD void scan_init {
- mmu_t _mmu;
-};
-
-/**
- * @brief Scan kernel PMAP, adding mapped physical pages to dump.
- *
- * @retval pmap_size Number of bytes used by all PTE entries.
- */
-METHOD size_t scan_pmap {
- mmu_t _mmu;
-} DEFAULT mmu_null_scan_pmap;
-
-/**
- * @brief Initialize a PMAP dump.
- *
- * @param _blkpgs Size of a dump block, in pages.
- *
- * @retval ctx Dump context, used by dump_pmap.
- */
-METHOD void * dump_pmap_init {
- mmu_t _mmu;
- unsigned _blkpgs;
-} DEFAULT mmu_null_dump_pmap_init;
-
-/**
- * @brief Dump a block of PTEs.
- * The size of the dump block is specified in dump_pmap_init and
- * the 'buf' argument must be big enough to hold a full block.
- * If the page table resides in regular memory, then the 'buf'
- * argument is ignored and a pointer to the specified dump block
- * is returned instead, avoiding memory copy. Else, the buffer is
- * filled with PTEs and the own buffer pointer is returned.
- * In the end, the cursor in 'ctx' is adjusted to point to the next block.
- *
- * @param _ctx Dump context, retrieved from dump_pmap_init.
- * @param _buf Buffer to hold the dump block contents.
- * @param _nbytes Number of bytes dumped.
- *
- * @retval NULL No more blocks to dump.
- * @retval buf Pointer to dumped data (may be different than _buf).
- */
-METHOD void * dump_pmap {
- mmu_t _mmu;
- void *_ctx;
- void *_buf;
- u_long *_nbytes;
-} DEFAULT mmu_null_dump_pmap;
-
-/**
- * @brief Create a temporary thread-local KVA mapping of a single page.
- *
- * @param _pg The physical page to map
- *
- * @retval addr The temporary KVA
- */
-METHOD vm_offset_t quick_enter_page {
- mmu_t _mmu;
- vm_page_t _pg;
-};
-
-/**
- * @brief Undo a mapping created by quick_enter_page
- *
- * @param _va The mapped KVA
- */
-METHOD void quick_remove_page {
- mmu_t _mmu;
- vm_offset_t _va;
-};
-
-/**
- * @brief Change the specified virtual address range's memory type.
- *
- * @param _va The virtual base address to change
- *
- * @param _sz Size of the region to change
- *
- * @param _mode New mode to set on the VA range
- *
- * @retval error 0 on success, EINVAL or ENOMEM on error.
- */
-METHOD int change_attr {
- mmu_t _mmu;
- vm_offset_t _va;
- vm_size_t _sz;
- vm_memattr_t _mode;
-} DEFAULT mmu_null_change_attr;
-
-/**
- * @brief Initialize the page array.
- *
- * @param _pages The number of pages to be accounted by the array.
- */
-METHOD void page_array_startup {
- mmu_t _mmu;
- long _pages;
-};
-
-METHOD boolean_t page_is_mapped {
- mmu_t _mmu;
- vm_page_t _pg;
-} DEFAULT;
-
-METHOD boolean_t ps_enabled {
- mmu_t _mmu;
- pmap_t _pmap;
-} DEFAULT mmu_null_ps_enabled;
-
-
-/**
- * @brief Flush the TLB (used by machine check handler).
- */
-METHOD void tlbie_all {
- mmu_t _mmu;
-};
diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c
index dda6c4f6ae62..5f3ae992c907 100644
--- a/sys/powerpc/powerpc/pmap_dispatch.c
+++ b/sys/powerpc/powerpc/pmap_dispatch.c
@@ -53,19 +53,16 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <vm/vm.h>
+#include <vm/vm_extern.h>
#include <vm/vm_page.h>
#include <machine/dump.h>
+#include <machine/ifunc.h>
#include <machine/md_var.h>
#include <machine/mmuvar.h>
#include <machine/smp.h>
-#include "mmu_if.h"
-
-static mmu_def_t *mmu_def_impl;
static mmu_t mmu_obj;
-static struct mmu_kobj mmu_kernel_obj;
-static struct kobj_ops mmu_kernel_kops;
/*
* pmap globals
@@ -93,574 +90,136 @@ pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
}
RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
#endif
-
-
-void
-pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
-{
-
- CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
- advice);
- MMU_ADVISE(mmu_obj, pmap, start, end, advice);
-}
-
-void
-pmap_clear_modify(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- MMU_CLEAR_MODIFY(mmu_obj, m);
-}
-
-void
-pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
- vm_size_t len, vm_offset_t src_addr)
-{
-
- CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
- src_pmap, dst_addr, len, src_addr);
- MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
-}
-
-void
-pmap_copy_page(vm_page_t src, vm_page_t dst)
-{
-
- CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
- MMU_COPY_PAGE(mmu_obj, src, dst);
-}
-
-void
-pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
- vm_offset_t b_offset, int xfersize)
-{
-
- CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
- a_offset, mb, b_offset, xfersize);
- MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
-}
-
-int
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
- u_int flags, int8_t psind)
-{
-
- CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %#x, %d)", pmap, va,
- p, prot, flags, psind);
- return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
-}
-
-void
-pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
- vm_page_t m_start, vm_prot_t prot)
-{
-
- CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
- end, m_start, prot);
- MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
-}
-
-void
-pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
-{
-
- CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
- MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
-}
-
-vm_paddr_t
-pmap_extract(pmap_t pmap, vm_offset_t va)
-{
-
- CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
- return (MMU_EXTRACT(mmu_obj, pmap, va));
-}
-
-vm_page_t
-pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
-{
-
- CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
- return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
-}
-
-void
-pmap_growkernel(vm_offset_t va)
-{
-
- CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
- MMU_GROWKERNEL(mmu_obj, va);
-}
-
-void
-pmap_init(void)
-{
-
- CTR1(KTR_PMAP, "%s()", __func__);
- MMU_INIT(mmu_obj);
-}
-
-boolean_t
-pmap_is_modified(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- return (MMU_IS_MODIFIED(mmu_obj, m));
-}
-
-boolean_t
-pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
-{
-
- CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
- return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
-}
-
-boolean_t
-pmap_is_referenced(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- return (MMU_IS_REFERENCED(mmu_obj, m));
-}
-
-boolean_t
-pmap_ts_referenced(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- return (MMU_TS_REFERENCED(mmu_obj, m));
-}
-
-vm_offset_t
-pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
-{
-
- CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
- prot);
- return (MMU_MAP(mmu_obj, virt, start, end, prot));
-}
-
-void
-pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
- vm_pindex_t pindex, vm_size_t size)
-{
-
- CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
- object, pindex, size);
- MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
-}
-
-boolean_t
-pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
-{
-
- CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
- return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
-}
-
-void
-pmap_page_init(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- MMU_PAGE_INIT(mmu_obj, m);
-}
-
-int
-pmap_page_wired_mappings(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
-}
-
-int
-pmap_pinit(pmap_t pmap)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
- MMU_PINIT(mmu_obj, pmap);
- return (1);
-}
-
-void
-pmap_pinit0(pmap_t pmap)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
- MMU_PINIT0(mmu_obj, pmap);
-}
-
-void
-pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
-{
-
- CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
- prot);
- MMU_PROTECT(mmu_obj, pmap, start, end, prot);
-}
-
-void
-pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
-{
-
- CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
- MMU_QENTER(mmu_obj, start, m, count);
-}
-
-void
-pmap_qremove(vm_offset_t start, int count)
-{
-
- CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
- MMU_QREMOVE(mmu_obj, start, count);
-}
-
-void
-pmap_release(pmap_t pmap)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
- MMU_RELEASE(mmu_obj, pmap);
-}
-
-void
-pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
-{
-
- CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
- MMU_REMOVE(mmu_obj, pmap, start, end);
-}
-
-void
-pmap_remove_all(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- MMU_REMOVE_ALL(mmu_obj, m);
-}
-
-void
-pmap_remove_pages(pmap_t pmap)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
- MMU_REMOVE_PAGES(mmu_obj, pmap);
-}
-
-void
-pmap_remove_write(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- MMU_REMOVE_WRITE(mmu_obj, m);
-}
-
-void
-pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
-{
-
- CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
- MMU_UNWIRE(mmu_obj, pmap, start, end);
-}
-void
-pmap_zero_page(vm_page_t m)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- MMU_ZERO_PAGE(mmu_obj, m);
-}
-
-void
-pmap_zero_page_area(vm_page_t m, int off, int size)
-{
-
- CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
- MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
-}
-
-int
-pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
-{
-
- CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
- return (MMU_MINCORE(mmu_obj, pmap, addr, pap));
-}
-
-void
-pmap_activate(struct thread *td)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, td);
- MMU_ACTIVATE(mmu_obj, td);
-}
-
-void
-pmap_deactivate(struct thread *td)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, td);
- MMU_DEACTIVATE(mmu_obj, td);
-}
-
-/*
- * Increase the starting virtual address of the given mapping if a
- * different alignment might result in more superpage mappings.
- */
-void
-pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
- vm_offset_t *addr, vm_size_t size)
-{
-
- CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
- size);
- MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
-}
-
-/*
- * Routines used in machine-dependent code
- */
-void
-pmap_bootstrap(vm_offset_t start, vm_offset_t end)
-{
- mmu_obj = &mmu_kernel_obj;
-
- /*
- * Take care of compiling the selected class, and
- * then statically initialise the MMU object
- */
- kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
- kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
-
- MMU_BOOTSTRAP(mmu_obj, start, end);
-}
-
-void
-pmap_cpu_bootstrap(int ap)
-{
- /*
- * No KTR here because our console probably doesn't work yet
- */
-
- return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
-}
-
-void *
-pmap_mapdev(vm_paddr_t pa, vm_size_t size)
-{
-
- CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
- return (MMU_MAPDEV(mmu_obj, pa, size));
-}
-
-void *
-pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
-{
-
- CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
- return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
-}
-
-void
-pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
-{
-
- CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
- return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
-}
-
-void
-pmap_unmapdev(vm_offset_t va, vm_size_t size)
-{
-
- CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
- MMU_UNMAPDEV(mmu_obj, va, size);
-}
-
-vm_paddr_t
-pmap_kextract(vm_offset_t va)
-{
-
- CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
- return (MMU_KEXTRACT(mmu_obj, va));
-}
-
-void
-pmap_kenter(vm_offset_t va, vm_paddr_t pa)
-{
-
- CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
- MMU_KENTER(mmu_obj, va, pa);
-}
-
-void
-pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
-{
-
- CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
- MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
-}
-
-void
-pmap_kremove(vm_offset_t va)
-{
-
- CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
- return (MMU_KREMOVE(mmu_obj, va));
-}
-
-int
-pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr,
- size_t ulen, size_t *klen)
-{
-
- CTR2(KTR_PMAP, "%s(%p)", __func__, uaddr);
- return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
-}
-
-int
-pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
-{
-
- CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
- return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
-}
-
-boolean_t
-pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
-{
-
- CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
- return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
-}
-
-void
-pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
-{
-
- CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
- return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
-}
-
-void
-dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
-{
-
- CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
- return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
-}
-
-void
-dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
-{
-
- CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
- return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
-}
-
-void
-dumpsys_pa_init(void)
+static int
+pmap_nomethod(void)
{
-
- CTR1(KTR_PMAP, "%s()", __func__);
- return (MMU_SCAN_INIT(mmu_obj));
-}
-
-size_t
-dumpsys_scan_pmap(void)
-{
- CTR1(KTR_PMAP, "%s()", __func__);
- return (MMU_SCAN_PMAP(mmu_obj));
-}
-
-void *
-dumpsys_dump_pmap_init(unsigned blkpgs)
-{
- CTR1(KTR_PMAP, "%s()", __func__);
- return (MMU_DUMP_PMAP_INIT(mmu_obj, blkpgs));
-}
-
-void *
-dumpsys_dump_pmap(void *ctx, void *buf, u_long *nbytes)
-{
- CTR1(KTR_PMAP, "%s()", __func__);
- return (MMU_DUMP_PMAP(mmu_obj, ctx, buf, nbytes));
-}
-
-vm_offset_t
-pmap_quick_enter_page(vm_page_t m)
-{
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
-}
-
-void
-pmap_quick_remove_page(vm_offset_t addr)
-{
- CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
- MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
-}
-
-int
-pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
-{
- CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
- return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
-}
-
-void
-pmap_page_array_startup(long pages)
-{
- CTR2(KTR_PMAP, "%s(%ld)", __func__, pages);
- MMU_PAGE_ARRAY_STARTUP(mmu_obj, pages);
-}
-
-boolean_t
-pmap_page_is_mapped(vm_page_t m)
-{
- CTR2(KTR_PMAP, "%s(%p)", __func__, m);
- return (MMU_PAGE_IS_MAPPED(mmu_obj, m));
+ return (0);
}
-bool
-pmap_ps_enabled(pmap_t pmap)
-{
- CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
- return (MMU_PS_ENABLED(mmu_obj, pmap));
-}
+#define DEFINE_PMAP_IFUNC(ret, func, args) \
+ DEFINE_IFUNC(, ret, pmap_##func, args) { \
+ const struct mmu_kobj *mmu = mmu_obj; \
+ pmap_##func##_t f; \
+ do { \
+ f = mmu->funcs->func; \
+ if (f != NULL) break; \
+ mmu = mmu->base; \
+ } while (mmu != NULL); \
+ return (f != NULL ? f : (pmap_##func##_t)pmap_nomethod);\
+ }
+#define DEFINE_DUMPSYS_IFUNC(ret, func, args) \
+ DEFINE_IFUNC(, ret, dumpsys_##func, args) { \
+ const struct mmu_kobj *mmu = mmu_obj; \
+ pmap_dumpsys_##func##_t f; \
+ do { \
+ f = mmu->funcs->dumpsys_##func; \
+ if (f != NULL) break; \
+ mmu = mmu->base; \
+ } while (mmu != NULL); \
+ return (f != NULL ? f : (pmap_dumpsys_##func##_t)pmap_nomethod);\
+ }
-void
-pmap_tlbie_all(void)
-{
- CTR1(KTR_PMAP, "%s()", __func__);
- return (MMU_TLBIE_ALL(mmu_obj));
-}
+DEFINE_PMAP_IFUNC(void, activate, (struct thread *));
+DEFINE_PMAP_IFUNC(void, advise, (pmap_t, vm_offset_t, vm_offset_t, int));
+DEFINE_PMAP_IFUNC(void, align_superpage, (vm_object_t, vm_ooffset_t,
+ vm_offset_t *, vm_size_t));
+DEFINE_PMAP_IFUNC(void, clear_modify, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, copy, (pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
+DEFINE_PMAP_IFUNC(int, enter, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t));
+DEFINE_PMAP_IFUNC(void, enter_quick, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t));
+DEFINE_PMAP_IFUNC(void, enter_object, (pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
+ vm_prot_t));
+DEFINE_PMAP_IFUNC(vm_paddr_t, extract, (pmap_t, vm_offset_t));
+DEFINE_PMAP_IFUNC(vm_page_t, extract_and_hold, (pmap_t, vm_offset_t, vm_prot_t));
+DEFINE_PMAP_IFUNC(void, kenter, (vm_offset_t, vm_paddr_t));
+DEFINE_PMAP_IFUNC(void, kenter_attr, (vm_offset_t, vm_paddr_t, vm_memattr_t));
+DEFINE_PMAP_IFUNC(vm_paddr_t, kextract, (vm_offset_t));
+DEFINE_PMAP_IFUNC(void, kremove, (vm_offset_t));
+DEFINE_PMAP_IFUNC(void, object_init_pt, (pmap_t, vm_offset_t, vm_object_t, vm_pindex_t,
+ vm_size_t));
+DEFINE_PMAP_IFUNC(boolean_t, is_modified, (vm_page_t));
+DEFINE_PMAP_IFUNC(boolean_t, is_prefaultable, (pmap_t, vm_offset_t));
+DEFINE_PMAP_IFUNC(boolean_t, is_referenced, (vm_page_t));
+DEFINE_PMAP_IFUNC(boolean_t, page_exists_quick, (pmap_t, vm_page_t));
+DEFINE_PMAP_IFUNC(void, page_init, (vm_page_t));
+DEFINE_PMAP_IFUNC(boolean_t, page_is_mapped, (vm_page_t));
+DEFINE_PMAP_IFUNC(int, page_wired_mappings, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, protect, (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t));
+DEFINE_PMAP_IFUNC(bool, ps_enabled, (pmap_t));
+DEFINE_PMAP_IFUNC(void, qenter, (vm_offset_t, vm_page_t *, int));
+DEFINE_PMAP_IFUNC(void, qremove, (vm_offset_t, int));
+DEFINE_PMAP_IFUNC(vm_offset_t, quick_enter_page, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, quick_remove_page, (vm_offset_t));
+DEFINE_PMAP_IFUNC(boolean_t, ts_referenced, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, release, (pmap_t));
+DEFINE_PMAP_IFUNC(void, remove, (pmap_t, vm_offset_t, vm_offset_t));
+DEFINE_PMAP_IFUNC(void, remove_all, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, remove_pages, (pmap_t));
+DEFINE_PMAP_IFUNC(void, remove_write, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, unwire, (pmap_t, vm_offset_t, vm_offset_t));
+DEFINE_PMAP_IFUNC(void, zero_page, (vm_page_t));
+DEFINE_PMAP_IFUNC(void, zero_page_area, (vm_page_t, int, int));
+DEFINE_PMAP_IFUNC(void, copy_page, (vm_page_t, vm_page_t));
+DEFINE_PMAP_IFUNC(void, copy_pages,
+ (vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize));
+DEFINE_PMAP_IFUNC(void, growkernel, (vm_offset_t));
+DEFINE_PMAP_IFUNC(void, init, (void));
+DEFINE_PMAP_IFUNC(vm_offset_t, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int));
+DEFINE_PMAP_IFUNC(int, pinit, (pmap_t));
+DEFINE_PMAP_IFUNC(void, pinit0, (pmap_t));
+DEFINE_PMAP_IFUNC(int, mincore, (pmap_t, vm_offset_t, vm_paddr_t *));
+DEFINE_PMAP_IFUNC(void, deactivate, (struct thread *));
+DEFINE_PMAP_IFUNC(void, bootstrap, (vm_offset_t, vm_offset_t));
+DEFINE_PMAP_IFUNC(void, cpu_bootstrap, (int));
+DEFINE_PMAP_IFUNC(void *, mapdev, (vm_paddr_t, vm_size_t));
+DEFINE_PMAP_IFUNC(void *, mapdev_attr, (vm_paddr_t, vm_size_t, vm_memattr_t));
+DEFINE_PMAP_IFUNC(void, page_set_memattr, (vm_page_t, vm_memattr_t));
+DEFINE_PMAP_IFUNC(void, unmapdev, (vm_offset_t, vm_size_t));
+DEFINE_PMAP_IFUNC(int, map_user_ptr,
+ (pmap_t, volatile const void *, void **, size_t, size_t *));
+DEFINE_PMAP_IFUNC(int, decode_kernel_ptr, (vm_offset_t, int *, vm_offset_t *));
+DEFINE_PMAP_IFUNC(boolean_t, dev_direct_mapped, (vm_paddr_t, vm_size_t));
+DEFINE_PMAP_IFUNC(void, sync_icache, (pmap_t, vm_offset_t, vm_size_t));
+DEFINE_PMAP_IFUNC(int, change_attr, (vm_offset_t, vm_size_t, vm_memattr_t));
+DEFINE_PMAP_IFUNC(void, page_array_startup, (long));
+DEFINE_PMAP_IFUNC(void, tlbie_all, (void));
+
+DEFINE_DUMPSYS_IFUNC(void, map_chunk, (vm_paddr_t, size_t, void **));
+DEFINE_DUMPSYS_IFUNC(void, unmap_chunk, (vm_paddr_t, size_t, void *));
+DEFINE_DUMPSYS_IFUNC(void, pa_init, (void));
+DEFINE_DUMPSYS_IFUNC(size_t, scan_pmap, (void));
+DEFINE_DUMPSYS_IFUNC(void *, dump_pmap_init, (unsigned));
+DEFINE_DUMPSYS_IFUNC(void *, dump_pmap, (void *, void *, u_long *));
/*
* MMU install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.
*/
-SET_DECLARE(mmu_set, mmu_def_t);
+SET_DECLARE(mmu_set, struct mmu_kobj);
boolean_t
pmap_mmu_install(char *name, int prio)
{
- mmu_def_t **mmupp, *mmup;
+ mmu_t *mmupp, mmup;
static int curr_prio = 0;
+ printf("Trying to install pmap %s\n", name);
+
/*
* Try and locate the MMU kobj corresponding to the name
*/
SET_FOREACH(mmupp, mmu_set) {
mmup = *mmupp;
+ printf("Checking %s(%p)\n", mmup->name, mmup->name);
if (mmup->name &&
!strcmp(mmup->name, name) &&
- (prio >= curr_prio || mmu_def_impl == NULL)) {
+ (prio >= curr_prio || mmu_obj == NULL)) {
+ printf("match found: %p\n", mmup);
curr_prio = prio;
- mmu_def_impl = mmup;
+ mmu_obj = mmup;
return (TRUE);
}
}
@@ -668,10 +227,18 @@ pmap_mmu_install(char *name, int prio)
return (FALSE);
}
+/* MMU "pre-bootstrap" init, used to install extra resolvers, etc. */
+void
+pmap_mmu_init()
+{
+ if (mmu_obj->funcs->install != NULL)
+ (mmu_obj->funcs->install)();
+}
+
const char *
pmap_mmu_name(void)
{
- return (mmu_obj->ops->cls->name);
+ return (mmu_obj->name);
}
int unmapped_buf_allowed;
diff --git a/sys/powerpc/ps3/mmu_ps3.c b/sys/powerpc/ps3/mmu_ps3.c
index ede12958e9d5..eb410eff4587 100644
--- a/sys/powerpc/ps3/mmu_ps3.c
+++ b/sys/powerpc/ps3/mmu_ps3.c
@@ -51,8 +51,6 @@ __FBSDID("$FreeBSD$");
#include <powerpc/aim/mmu_oea64.h>
-#include "mmu_if.h"
-#include "moea64_if.h"
#include "ps3-hvcall.h"
#define VSID_HASH_MASK 0x0000007fffffffffUL
@@ -66,39 +64,47 @@ static uint64_t mps3_vas_id;
* Kernel MMU interface
*/
-static void mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
+static void mps3_install(void);
+static void mps3_bootstrap(vm_offset_t kernelstart,
vm_offset_t kernelend);
-static void mps3_cpu_bootstrap(mmu_t mmup, int ap);
-static int64_t mps3_pte_synch(mmu_t, struct pvo_entry *);
-static int64_t mps3_pte_clear(mmu_t, struct pvo_entry *, uint64_t ptebit);
-static int64_t mps3_pte_unset(mmu_t, struct pvo_entry *);
-static int mps3_pte_insert(mmu_t, struct pvo_entry *);
+static void mps3_cpu_bootstrap(int ap);
+static int64_t mps3_pte_synch(struct pvo_entry *);
+static int64_t mps3_pte_clear(struct pvo_entry *, uint64_t ptebit);
+static int64_t mps3_pte_unset(struct pvo_entry *);
+static int64_t mps3_pte_insert(struct pvo_entry *);
-static mmu_method_t mps3_methods[] = {
- MMUMETHOD(mmu_bootstrap, mps3_bootstrap),
- MMUMETHOD(mmu_cpu_bootstrap, mps3_cpu_bootstrap),
-
- MMUMETHOD(moea64_pte_synch, mps3_pte_synch),
- MMUMETHOD(moea64_pte_clear, mps3_pte_clear),
- MMUMETHOD(moea64_pte_unset, mps3_pte_unset),
- MMUMETHOD(moea64_pte_insert, mps3_pte_insert),
+static struct pmap_funcs mps3_methods = {
+ .install = mps3_install,
+ .bootstrap = mps3_bootstrap,
+ .cpu_bootstrap = mps3_cpu_bootstrap,
+};
- { 0, 0 }
+static struct moea64_funcs mps3_funcs = {
+ .pte_synch = mps3_pte_synch,
+ .pte_clear = mps3_pte_clear,
+ .pte_unset = mps3_pte_unset,
+ .pte_insert = mps3_pte_insert,
};
-MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, 0, oea64_mmu);
+MMU_DEF_INHERIT(ps3_mmu, "mmu_ps3", mps3_methods, oea64_mmu);
static struct mtx mps3_table_lock;
static void
-mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
+mps3_install()
+{
+ moea64_ops = &mps3_funcs;
+}
+
+static void
+mps3_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
{
uint64_t final_pteg_count;
mtx_init(&mps3_table_lock, "page table", NULL, MTX_DEF);
- moea64_early_bootstrap(mmup, kernelstart, kernelend);
+ moea64_early_bootstrap(kernelstart, kernelend);
/* In case we had a page table already */
lv1_destruct_virtual_address_space(0);
@@ -114,12 +120,12 @@ mps3_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
- moea64_mid_bootstrap(mmup, kernelstart, kernelend);
- moea64_late_bootstrap(mmup, kernelstart, kernelend);
+ moea64_mid_bootstrap(kernelstart, kernelend);
+ moea64_late_bootstrap(kernelstart, kernelend);
}
static void
-mps3_cpu_bootstrap(mmu_t mmup, int ap)
+mps3_cpu_bootstrap(int ap)
{
struct slb *slb = PCPU_GET(aim.slb);
register_t seg0;
@@ -179,7 +185,7 @@ mps3_pte_synch_locked(struct pvo_entry *pvo)
}
static int64_t
-mps3_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
+mps3_pte_synch(struct pvo_entry *pvo)
{
int64_t retval;
@@ -191,7 +197,7 @@ mps3_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
}
static int64_t
-mps3_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
+mps3_pte_clear(struct pvo_entry *pvo, uint64_t ptebit)
{
int64_t refchg;
struct lpte pte;
@@ -217,7 +223,7 @@ mps3_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
}
static int64_t
-mps3_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
+mps3_pte_unset(struct pvo_entry *pvo)
{
int64_t refchg;
@@ -236,8 +242,8 @@ mps3_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
return (refchg & (LPTE_REF | LPTE_CHG));
}
-static int
-mps3_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
+static int64_t
+mps3_pte_insert(struct pvo_entry *pvo)
{
int result;
struct lpte pte, evicted;
diff --git a/sys/powerpc/pseries/mmu_phyp.c b/sys/powerpc/pseries/mmu_phyp.c
index 951db59b2245..ca4ee79275a8 100644
--- a/sys/powerpc/pseries/mmu_phyp.c
+++ b/sys/powerpc/pseries/mmu_phyp.c
@@ -54,9 +54,6 @@ __FBSDID("$FreeBSD$");
#include <powerpc/aim/mmu_oea64.h>
-#include "mmu_if.h"
-#include "moea64_if.h"
-
#include "phyp-hvcall.h"
#define MMU_PHYP_DEBUG 0
@@ -75,32 +72,32 @@ static struct rmlock mphyp_eviction_lock;
* Kernel MMU interface
*/
-static void mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
+static void mphyp_install(void);
+static void mphyp_bootstrap(vm_offset_t kernelstart,
vm_offset_t kernelend);
-static void mphyp_cpu_bootstrap(mmu_t mmup, int ap);
-static void *mphyp_dump_pmap(mmu_t mmu, void *ctx, void *buf,
+static void mphyp_cpu_bootstrap(int ap);
+static void *mphyp_dump_pmap(void *ctx, void *buf,
u_long *nbytes);
-static int64_t mphyp_pte_synch(mmu_t, struct pvo_entry *pvo);
-static int64_t mphyp_pte_clear(mmu_t, struct pvo_entry *pvo, uint64_t ptebit);
-static int64_t mphyp_pte_unset(mmu_t, struct pvo_entry *pvo);
-static int mphyp_pte_insert(mmu_t, struct pvo_entry *pvo);
-
-static mmu_method_t mphyp_methods[] = {
- MMUMETHOD(mmu_bootstrap, mphyp_bootstrap),
- MMUMETHOD(mmu_cpu_bootstrap, mphyp_cpu_bootstrap),
- MMUMETHOD(mmu_dump_pmap, mphyp_dump_pmap),
-
- MMUMETHOD(moea64_pte_synch, mphyp_pte_synch),
- MMUMETHOD(moea64_pte_clear, mphyp_pte_clear),
- MMUMETHOD(moea64_pte_unset, mphyp_pte_unset),
- MMUMETHOD(moea64_pte_insert, mphyp_pte_insert),
-
- /* XXX: pmap_copy_page, pmap_init_page with H_PAGE_INIT */
+static int64_t mphyp_pte_synch(struct pvo_entry *pvo);
+static int64_t mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit);
+static int64_t mphyp_pte_unset(struct pvo_entry *pvo);
+static int64_t mphyp_pte_insert(struct pvo_entry *pvo);
+
+static struct pmap_funcs mphyp_methods = {
+ .install = mphyp_install,
+ .bootstrap = mphyp_bootstrap,
+ .cpu_bootstrap = mphyp_cpu_bootstrap,
+ .dumpsys_dump_pmap = mphyp_dump_pmap,
+};
- { 0, 0 }
+static struct moea64_funcs mmu_phyp_funcs = {
+ .pte_synch = mphyp_pte_synch,
+ .pte_clear = mphyp_pte_clear,
+ .pte_unset = mphyp_pte_unset,
+ .pte_insert = mphyp_pte_insert,
};
-MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, 0, oea64_mmu);
+MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, oea64_mmu);
static int brokenkvm = 0;
@@ -120,7 +117,14 @@ SYSINIT(kvmbugwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_kvm_bug_warning,
NULL);
static void
-mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
+mphyp_install()
+{
+
+ moea64_ops = &mmu_phyp_funcs;
+}
+
+static void
+mphyp_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
{
uint64_t final_pteg_count = 0;
char buf[8];
@@ -134,7 +138,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
rm_init(&mphyp_eviction_lock, "pte eviction");
- moea64_early_bootstrap(mmup, kernelstart, kernelend);
+ moea64_early_bootstrap(kernelstart, kernelend);
root = OF_peer(0);
@@ -246,8 +250,8 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
}
}
- moea64_mid_bootstrap(mmup, kernelstart, kernelend);
- moea64_late_bootstrap(mmup, kernelstart, kernelend);
+ moea64_mid_bootstrap(kernelstart, kernelend);
+ moea64_late_bootstrap(kernelstart, kernelend);
/* Test for broken versions of KVM that don't conform to the spec */
if (phyp_hcall(H_CLEAR_MOD, 0, 0) == H_FUNCTION)
@@ -255,7 +259,7 @@ mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
}
static void
-mphyp_cpu_bootstrap(mmu_t mmup, int ap)
+mphyp_cpu_bootstrap(int ap)
{
struct slb *slb = PCPU_GET(aim.slb);
register_t seg0;
@@ -277,7 +281,7 @@ mphyp_cpu_bootstrap(mmu_t mmup, int ap)
}
static int64_t
-mphyp_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
+mphyp_pte_synch(struct pvo_entry *pvo)
{
struct lpte pte;
uint64_t junk;
@@ -296,7 +300,7 @@ mphyp_pte_synch(mmu_t mmu, struct pvo_entry *pvo)
}
static int64_t
-mphyp_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
+mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit)
{
struct rm_priotracker track;
int64_t refchg;
@@ -313,7 +317,7 @@ mphyp_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
rm_rlock(&mphyp_eviction_lock, &track);
- refchg = mphyp_pte_synch(mmu, pvo);
+ refchg = mphyp_pte_synch(pvo);
if (refchg < 0) {
rm_runlock(&mphyp_eviction_lock, &track);
return (refchg);
@@ -350,7 +354,7 @@ mphyp_pte_clear(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
}
static int64_t
-mphyp_pte_unset(mmu_t mmu, struct pvo_entry *pvo)
+mphyp_pte_unset(struct pvo_entry *pvo)
{
struct lpte pte;
uint64_t junk;
@@ -410,8 +414,8 @@ mphyp_pte_spillable_ident(uintptr_t ptegbase, struct lpte *to_evict)
return (k);
}
-static int
-mphyp_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
+static int64_t
+mphyp_pte_insert(struct pvo_entry *pvo)
{
struct rm_priotracker track;
int64_t result;
@@ -509,7 +513,7 @@ mphyp_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
}
static void *
-mphyp_dump_pmap(mmu_t mmu, void *ctx, void *buf, u_long *nbytes)
+mphyp_dump_pmap(void *ctx, void *buf, u_long *nbytes)
{
struct dump_context *dctx;
struct lpte p, *pbuf;