aboutsummaryrefslogtreecommitdiff
path: root/sys/mips/mips/pmap.c
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2020-04-24 21:21:49 +0000
committerMark Johnston <markj@FreeBSD.org>2020-04-24 21:21:49 +0000
commit06459adc89b4c83d7e8aa5759897ef66d877bee9 (patch)
tree2607a26b647703b714bf72e308e380c605057377 /sys/mips/mips/pmap.c
parent117d10f3c19e32d90ea6c0e4128a38f6869e7572 (diff)
downloadsrc-06459adc89b4c83d7e8aa5759897ef66d877bee9.tar.gz
src-06459adc89b4c83d7e8aa5759897ef66d877bee9.zip
Fix a race in pmap_emulate_modified().
pmap_emulate_modify() was assuming that no changes to the pmap could take place between the TLB signaling the fault and pmap_emulate_modify()'s acquisition of the pmap lock, but that's clearly not even true in the uniprocessor case, nevermind the SMP case. Submitted by: Nathaniel Filardo <nwf20@cl.cam.ac.uk> Reviewed by: kib MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D24523
Notes
Notes: svn path=/head/; revision=360281
Diffstat (limited to 'sys/mips/mips/pmap.c')
-rw-r--r--sys/mips/mips/pmap.c65
1 files changed, 54 insertions, 11 deletions
diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c
index 7dcb8bf6e107..06cd5028a640 100644
--- a/sys/mips/mips/pmap.c
+++ b/sys/mips/mips/pmap.c
@@ -3502,28 +3502,71 @@ pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, va);
- if (pte == NULL)
- panic("pmap_emulate_modified: can't find PTE");
-#ifdef SMP
- /* It is possible that some other CPU changed m-bit */
- if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
+
+ /*
+ * It is possible that some other CPU or thread changed the pmap while
+ * we weren't looking; in the SMP case, this is readily apparent, but
+ * it can even happen in the UP case, because we may have been blocked
+ * on PMAP_LOCK(pmap) above while someone changed this out from
+ * underneath us.
+ */
+
+ if (pte == NULL) {
+ /*
+ * This PTE's PTP (or one of its ancestors) has been reclaimed;
+ * trigger a full fault to reconstruct it via pmap_enter.
+ */
+ PMAP_UNLOCK(pmap);
+ return (1);
+ }
+
+ if (!pte_test(pte, PTE_V)) {
+ /*
+ * This PTE is no longer valid; the other thread or other
+ * processor must have arranged for our TLB to no longer
+ * have this entry, possibly by IPI, so no tlb_update is
+ * required. Fall out of the fast path and go take a
+ * general fault before retrying the instruction (or taking
+ * a signal).
+ */
+ PMAP_UNLOCK(pmap);
+ return (1);
+ }
+
+ if (pte_test(pte, PTE_D)) {
+ /*
+ * This PTE is valid and has the PTE_D bit asserted; since
+ * this is an increase in permission, we may have been expected
+ * to update the TLB lazily. Do so here and return, on the
+ * fast path, to retry the instruction.
+ */
tlb_update(pmap, va, *pte);
PMAP_UNLOCK(pmap);
return (0);
}
-#else
- if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
- panic("pmap_emulate_modified: invalid pte");
-#endif
+
if (pte_test(pte, PTE_RO)) {
+ /*
+ * This PTE is valid, not dirty, and read-only. Go take a
+ * full fault (most likely to upgrade this part of the address
+ * space to writeable).
+ */
PMAP_UNLOCK(pmap);
return (1);
}
- pte_set(pte, PTE_D);
- tlb_update(pmap, va, *pte);
+
if (!pte_test(pte, PTE_MANAGED))
panic("pmap_emulate_modified: unmanaged page");
+
+ /*
+ * PTE is valid, managed, not dirty, and not read-only. Set PTE_D
+ * and eagerly update the local TLB, returning on the fast path.
+ */
+
+ pte_set(pte, PTE_D);
+ tlb_update(pmap, va, *pte);
PMAP_UNLOCK(pmap);
+
return (0);
}