diff options
author | Konstantin Belousov <kib@FreeBSD.org> | 2019-10-16 07:09:15 +0000 |
---|---|---|
committer | Konstantin Belousov <kib@FreeBSD.org> | 2019-10-16 07:09:15 +0000 |
commit | 2a499f92ba5f184036e6455fda2ba8a05e153d73 (patch) | |
tree | e0f49296aea6f95fd5fdab5bbba55417727cbb04 | |
parent | 0c4f60b734311ebe2d1b23b2937f0418c3fb8704 (diff) | |
download | src-2a499f92ba5f184036e6455fda2ba8a05e153d73.tar.gz src-2a499f92ba5f184036e6455fda2ba8a05e153d73.zip |
Fix assert in PowerPC pmaps after introduction of object busy.
The VM_PAGE_OBJECT_BUSY_ASSERT() in pmap_enter() implementation should
be only asserted when the code is executed as result of pmap_enter(),
not when the same code is entered from e.g. pmap_enter_quick(). This
is relevant for all PowerPC pmap variants, because mmu_*_enter() is
used as the backend, and assert is located there.
Add a PowerPC private pmap_enter() PMAP_ENTER_QUICK_LOCKED flag to
indicate that the call is not from pmap_enter(). For non-quick-locked
calls, assert that the object is locked.
Reported and tested by: bdragon
Reviewed by: alc, bdragon, markj
Sponsored by: The FreeBSD Foundation
Differential revision: https://reviews.freebsd.org/D22041
Notes
Notes:
svn path=/head/; revision=353622
-rw-r--r-- | sys/powerpc/aim/mmu_oea.c | 13 | ||||
-rw-r--r-- | sys/powerpc/aim/mmu_oea64.c | 13 | ||||
-rw-r--r-- | sys/powerpc/booke/pmap.c | 14 | ||||
-rw-r--r-- | sys/powerpc/include/pmap.h | 2 |
4 files changed, 29 insertions, 13 deletions
diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index 145873fe7100..1cc4520e1269 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -1149,8 +1149,12 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (pmap_bootstrapped) rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if ((m->oflags & VPO_UNMANAGED) == 0) - VM_PAGE_OBJECT_BUSY_ASSERT(m); + if ((m->oflags & VPO_UNMANAGED) == 0) { + if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0) + VM_PAGE_OBJECT_BUSY_ASSERT(m); + else + VM_OBJECT_ASSERT_LOCKED(m->object); + } if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) { pvo_head = &moea_pvo_kunmanaged; @@ -1218,7 +1222,8 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea_enter_locked(pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); + (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED, + 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); @@ -1233,7 +1238,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, rw_wlock(&pvh_global_lock); PMAP_LOCK(pm); moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - 0, 0); + PMAP_ENTER_QUICK_LOCKED, 0); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pm); } diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index ed78a0ccdb77..8ccc875bda45 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -1406,8 +1406,12 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, uint64_t pte_lo; int error; - if ((m->oflags & VPO_UNMANAGED) == 0) - VM_PAGE_OBJECT_BUSY_ASSERT(m); + if ((m->oflags & VPO_UNMANAGED) == 0) { + if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0) + VM_PAGE_OBJECT_BUSY_ASSERT(m); + else + VM_OBJECT_ASSERT_LOCKED(m->object); + } pvo = alloc_pvo_entry(0); if (pvo == NULL) @@ -1548,7 +1552,8 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, m = m_start; while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea64_enter(mmu, pm, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); + (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | + PMAP_ENTER_QUICK_LOCKED, 0); m = TAILQ_NEXT(m, listq); } } @@ -1559,7 +1564,7 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, { moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - PMAP_ENTER_NOSLEEP, 0); + PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0); } vm_paddr_t diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 126f96d0bd11..5560477b5b37 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -2278,8 +2278,12 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, KASSERT((va <= VM_MAXUSER_ADDRESS), ("mmu_booke_enter_locked: user pmap, non user va")); } - if ((m->oflags & VPO_UNMANAGED) == 0) - VM_PAGE_OBJECT_BUSY_ASSERT(m); + if ((m->oflags & VPO_UNMANAGED) == 0) { + if ((pmap_flags & PMAP_ENTER_QUICK_LOCKED) == 0) + VM_PAGE_OBJECT_BUSY_ASSERT(m); + else + VM_OBJECT_ASSERT_LOCKED(m->object); + } PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -2447,7 +2451,7 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - PMAP_ENTER_NOSLEEP, 0); + PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); @@ -2462,8 +2466,8 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, - prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, - 0); + prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP | + PMAP_ENTER_QUICK_LOCKED, 0); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h index a3910e03df79..bb58754cb282 100644 --- a/sys/powerpc/include/pmap.h +++ b/sys/powerpc/include/pmap.h @@ -80,6 +80,8 @@ struct pmap; typedef struct pmap *pmap_t; +#define PMAP_ENTER_QUICK_LOCKED 0x10000000 + #if !defined(NPMAPS) #define NPMAPS 32768 #endif /* !defined(NPMAPS) */ |