aboutsummaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h3
-rw-r--r--sys/vm/vm_fault.c20
-rw-r--r--sys/vm/vm_page.c32
3 files changed, 48 insertions, 7 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 1fd6518cf4ed..d0e005088745 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -164,5 +164,8 @@ vm_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
return (vm_addr_align_ok(pa, alignment) &&
vm_addr_bound_ok(pa, size, boundary));
}
+
+extern bool vm_check_pg_zero;
+
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 2e150b368d71..3bf16778d987 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -85,6 +85,8 @@
#include <sys/refcount.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
+#include <sys/sched.h>
+#include <sys/sf_buf.h>
#include <sys/signalvar.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
@@ -1220,6 +1222,24 @@ vm_fault_zerofill(struct faultstate *fs)
if ((fs->m->flags & PG_ZERO) == 0) {
pmap_zero_page(fs->m);
} else {
+#ifdef INVARIANTS
+ if (vm_check_pg_zero) {
+ struct sf_buf *sf;
+ unsigned long *p;
+ int i;
+
+ sched_pin();
+ sf = sf_buf_alloc(fs->m, SFB_CPUPRIVATE);
+ p = (unsigned long *)sf_buf_kva(sf);
+ for (i = 0; i < PAGE_SIZE / sizeof(*p); i++, p++) {
+ KASSERT(*p == 0,
+ ("zerocheck failed page %p PG_ZERO %d %jx",
+ fs->m, i, (uintmax_t)*p));
+ }
+ sf_buf_free(sf);
+ sched_unpin();
+ }
+#endif
VM_CNT_INC(v_ozfod);
}
VM_CNT_INC(v_zfod);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 16878604fa11..b39d665f9e0f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -84,6 +84,7 @@
#include <sys/sleepqueue.h>
#include <sys/sbuf.h>
#include <sys/sched.h>
+#include <sys/sf_buf.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
@@ -145,6 +146,13 @@ SYSCTL_ULONG(_vm_stats_page, OID_AUTO, nofreeq_size, CTLFLAG_RD,
&nofreeq_size, 0,
"Size of the nofree queue");
+#ifdef INVARIANTS
+bool vm_check_pg_zero = false;
+SYSCTL_BOOL(_debug, OID_AUTO, vm_check_pg_zero, CTLFLAG_RWTUN,
+ &vm_check_pg_zero, 0,
+ "verify content of freed zero-filled pages");
+#endif
+
/*
* bogus page -- for I/O to/from partially complete buffers,
* or for paging into sparsely invalid regions.
@@ -4050,14 +4058,24 @@ vm_page_free_prep(vm_page_t m)
*/
atomic_thread_fence_acq();
-#if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
- if (PMAP_HAS_DMAP && (m->flags & PG_ZERO) != 0) {
- uint64_t *p;
+#ifdef INVARIANTS
+ if (vm_check_pg_zero && (m->flags & PG_ZERO) != 0) {
+ struct sf_buf *sf;
+ unsigned long *p;
int i;
- p = (uint64_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
- for (i = 0; i < PAGE_SIZE / sizeof(uint64_t); i++, p++)
- KASSERT(*p == 0, ("vm_page_free_prep %p PG_ZERO %d %jx",
- m, i, (uintmax_t)*p));
+
+ sched_pin();
+ sf = sf_buf_alloc(m, SFB_CPUPRIVATE | SFB_NOWAIT);
+ if (sf != NULL) {
+ p = (unsigned long *)sf_buf_kva(sf);
+ for (i = 0; i < PAGE_SIZE / sizeof(*p); i++, p++) {
+ KASSERT(*p == 0,
+ ("zerocheck failed page %p PG_ZERO %d %jx",
+ m, i, (uintmax_t)*p));
+ }
+ sf_buf_free(sf);
+ }
+ sched_unpin();
}
#endif
if ((m->oflags & VPO_UNMANAGED) == 0) {