aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/riscv/riscv/pmap.c71
1 files changed, 49 insertions, 22 deletions
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index ddae601aa070..270b8d56d354 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -2040,6 +2040,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
new_l3 |= (pn << PTE_PPN0_S);
if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= PTE_SW_WIRED;
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ new_l3 |= PTE_SW_MANAGED;
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
@@ -2109,9 +2111,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
}
- om = NULL;
orig_l3 = pmap_load(l3);
opa = PTE_TO_PHYS(orig_l3);
+ pv = NULL;
/*
* Is the specified virtual address already mapped?
@@ -2148,7 +2150,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* No, might be a protection or wiring change.
*/
if ((orig_l3 & PTE_SW_MANAGED) != 0) {
- new_l3 |= PTE_SW_MANAGED;
if (pmap_is_write(new_l3))
vm_page_aflag_set(m, PGA_WRITEABLE);
}
@@ -2158,6 +2159,42 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/* Flush the cache, there might be uncommitted data in it */
if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
cpu_dcache_wb_range(va, L3_SIZE);
+
+ /*
+ * The physical page has changed. Temporarily invalidate
+ * the mapping. This ensures that all threads sharing the
+ * pmap keep a consistent view of the mapping, which is
+ * necessary for the correct handling of COW faults. It
+ * also permits reuse of the old mapping's PV entry,
+ * avoiding an allocation.
+ *
+ * For consistency, handle unmanaged mappings the same way.
+ */
+ orig_l3 = pmap_load_clear(l3);
+ KASSERT(PTE_TO_PHYS(orig_l3) == opa,
+ ("pmap_enter: unexpected pa update for %#lx", va));
+ if ((orig_l3 & PTE_SW_MANAGED) != 0) {
+ om = PHYS_TO_VM_PAGE(opa);
+
+ /*
+ * The pmap lock is sufficient to synchronize with
+ * concurrent calls to pmap_page_test_mappings() and
+ * pmap_ts_referenced().
+ */
+ if (pmap_page_dirty(orig_l3))
+ vm_page_dirty(om);
+ if ((orig_l3 & PTE_A) != 0)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pv = pmap_pvh_remove(&om->md, pmap, va);
+ if ((new_l3 & PTE_SW_MANAGED) == 0)
+ free_pv_entry(pmap, pv);
+ if ((om->aflags & PGA_WRITEABLE) != 0 &&
+ TAILQ_EMPTY(&om->md.pv_list))
+ vm_page_aflag_clear(om, PGA_WRITEABLE);
+ }
+ pmap_invalidate_page(pmap, va);
+ orig_l3 = 0;
} else {
/*
* Increment the counters.
@@ -2169,10 +2206,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Enter on the PV list if part of our managed memory.
*/
- if ((m->oflags & VPO_UNMANAGED) == 0) {
- new_l3 |= PTE_SW_MANAGED;
- pv = get_pv_entry(pmap, &lock);
- pv->pv_va = va;
+ if ((new_l3 & PTE_SW_MANAGED) != 0) {
+ if (pv == NULL) {
+ pv = get_pv_entry(pmap, &lock);
+ pv->pv_va = va;
+ }
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
@@ -2187,22 +2225,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
validate:
orig_l3 = pmap_load_store(l3, new_l3);
PTE_SYNC(l3);
- opa = PTE_TO_PHYS(orig_l3);
-
- if (opa != pa) {
- if ((orig_l3 & PTE_SW_MANAGED) != 0) {
- om = PHYS_TO_VM_PAGE(opa);
- if (pmap_page_dirty(orig_l3))
- vm_page_dirty(om);
- if ((orig_l3 & PTE_A) != 0)
- vm_page_aflag_set(om, PGA_REFERENCED);
- CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
- pmap_pvh_free(&om->md, pmap, va);
- }
- } else if (pmap_page_dirty(orig_l3)) {
- if ((orig_l3 & PTE_SW_MANAGED) != 0)
- vm_page_dirty(m);
- }
+ KASSERT(PTE_TO_PHYS(orig_l3) == pa,
+ ("pmap_enter: invalid update"));
+ if (pmap_page_dirty(orig_l3) &&
+ (orig_l3 & PTE_SW_MANAGED) != 0)
+ vm_page_dirty(m);
} else {
pmap_load_store(l3, new_l3);
PTE_SYNC(l3);