aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlan Cox <alc@FreeBSD.org>2018-07-14 20:14:00 +0000
committerAlan Cox <alc@FreeBSD.org>2018-07-14 20:14:00 +0000
commitafeed44dc57e9aef89d97cf52dbf27ae2ae0e873 (patch)
tree23bed2f8035749fc0123511a2f303172ecf3e862
parent60e4fb3a3fe64cea442720a8c7a8e2024b2cb92e (diff)
downloadsrc-afeed44dc57e9aef89d97cf52dbf27ae2ae0e873.tar.gz
src-afeed44dc57e9aef89d97cf52dbf27ae2ae0e873.zip
Invalidate the mapping before updating its physical address.
Doing so ensures that all threads sharing the pmap have a consistent view of the mapping. This fixes the problem described in the commit log message for r329254 without the overhead of an extra page fault in the common case. (Now that all pmap_enter() implementations are similarly modified, the workaround added in r329254 can be removed, reducing the overhead of COW faults.) With this change we can reuse the PV entry from the old mapping, potentially avoiding a call to reclaim_pv_chunk(). Otherwise, there is nothing preventing the old PV entry from being reclaimed. In rare cases this could result in the PTE's page table page being freed, leading to a use-after-free of the page when the updated PTE is written following the allocation of the PV entry for the new mapping. Reviewed by: br, markj MFC after: 3 weeks Differential Revision: https://reviews.freebsd.org/D16261
Notes
Notes: svn path=/head/; revision=336294
-rw-r--r--sys/riscv/riscv/pmap.c71
1 files changed, 49 insertions, 22 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index ddae601aa070..270b8d56d354 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -2040,6 +2040,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
new_l3 |= (pn << PTE_PPN0_S);
if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= PTE_SW_WIRED;
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ new_l3 |= PTE_SW_MANAGED;
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
@@ -2109,9 +2111,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
}
- om = NULL;
orig_l3 = pmap_load(l3);
opa = PTE_TO_PHYS(orig_l3);
+ pv = NULL;
/*
* Is the specified virtual address already mapped?
@@ -2148,7 +2150,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* No, might be a protection or wiring change.
*/
if ((orig_l3 & PTE_SW_MANAGED) != 0) {
- new_l3 |= PTE_SW_MANAGED;
if (pmap_is_write(new_l3))
vm_page_aflag_set(m, PGA_WRITEABLE);
}
@@ -2158,6 +2159,42 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/* Flush the cache, there might be uncommitted data in it */
if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
cpu_dcache_wb_range(va, L3_SIZE);
+
+ /*
+ * The physical page has changed. Temporarily invalidate
+ * the mapping. This ensures that all threads sharing the
+ * pmap keep a consistent view of the mapping, which is
+ * necessary for the correct handling of COW faults. It
+ * also permits reuse of the old mapping's PV entry,
+ * avoiding an allocation.
+ *
+ * For consistency, handle unmanaged mappings the same way.
+ */
+ orig_l3 = pmap_load_clear(l3);
+ KASSERT(PTE_TO_PHYS(orig_l3) == opa,
+ ("pmap_enter: unexpected pa update for %#lx", va));
+ if ((orig_l3 & PTE_SW_MANAGED) != 0) {
+ om = PHYS_TO_VM_PAGE(opa);
+
+ /*
+ * The pmap lock is sufficient to synchronize with
+ * concurrent calls to pmap_page_test_mappings() and
+ * pmap_ts_referenced().
+ */
+ if (pmap_page_dirty(orig_l3))
+ vm_page_dirty(om);
+ if ((orig_l3 & PTE_A) != 0)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pv = pmap_pvh_remove(&om->md, pmap, va);
+ if ((new_l3 & PTE_SW_MANAGED) == 0)
+ free_pv_entry(pmap, pv);
+ if ((om->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ }
+ pmap_invalidate_page(pmap, va);
+ orig_l3 = 0;
} else {
/*
* Increment the counters.
@@ -2169,10 +2206,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Enter on the PV list if part of our managed memory.
*/
- if ((m->oflags & VPO_UNMANAGED) == 0) {
- new_l3 |= PTE_SW_MANAGED;
- pv = get_pv_entry(pmap, &lock);
- pv->pv_va = va;
+ if ((new_l3 & PTE_SW_MANAGED) != 0) {
+ if (pv == NULL) {
+ pv = get_pv_entry(pmap, &lock);
+ pv->pv_va = va;
+ }
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
@@ -2187,22 +2225,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
validate:
orig_l3 = pmap_load_store(l3, new_l3);
PTE_SYNC(l3);
- opa = PTE_TO_PHYS(orig_l3);
-
- if (opa != pa) {
- if ((orig_l3 & PTE_SW_MANAGED) != 0) {
- om = PHYS_TO_VM_PAGE(opa);
- if (pmap_page_dirty(orig_l3))
- vm_page_dirty(om);
- if ((orig_l3 & PTE_A) != 0)
- vm_page_aflag_set(om, PGA_REFERENCED);
- CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
- pmap_pvh_free(&om->md, pmap, va);
- }
- } else if (pmap_page_dirty(orig_l3)) {
- if ((orig_l3 & PTE_SW_MANAGED) != 0)
- vm_page_dirty(m);
- }
+ KASSERT(PTE_TO_PHYS(orig_l3) == pa,
+ ("pmap_enter: invalid update"));
+ if (pmap_page_dirty(orig_l3) &&
+ (orig_l3 & PTE_SW_MANAGED) != 0)
+ vm_page_dirty(m);
} else {
pmap_load_store(l3, new_l3);
PTE_SYNC(l3);