aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Laier <mlaier@FreeBSD.org>2021-02-24 23:56:16 +0000
committerRyan Libby <rlibby@FreeBSD.org>2021-02-24 23:56:16 +0000
commit14b5a3c7d5c034c2a5a487b5e2d0de79c2801a65 (patch)
treee87e97d334f51a2fa5c6a6b219363a122b4bb350
parent9a227a2fd642ec057a0ec70d67d5699d65553294 (diff)
downloadsrc-14b5a3c7d5c034c2a5a487b5e2d0de79c2801a65.tar.gz
src-14b5a3c7d5c034c2a5a487b5e2d0de79c2801a65.zip
vm pqbatch: move unmanaged page assert under pagequeue lock
This KASSERT is overzealous because of the following race condition: 1) A managed page which is currently in PQ_LAUNDRY is freed. vm_page_free_prep calls vm_page_dequeue_deferred() The page state is: PQ_LAUNDRY, PGA_DEQUEUE|PGA_ENQUEUED 2) The laundry worker comes around and pick up the page and calls vm_pageout_defer(m, PQ_LAUNDRY, true) to check if page is still in the queue. We do a vm_page_astate_load and get PQ_LAUNDRY, PGA_DEQUEUE|PGA_ENQUEUED as per above. 3) The laundry worker is pre-empted and another thread allocates our page from the free pool. For example vm_page_alloc_domain_after calls vm_page_dequeue() and sets VPO_UNMANAGED because we are allocating for an OBJT_UNMANAGED object. The page state is: PQ_NONE, 0 - VPO_UNMANAGED 4) The laundry worker resumes, and processes vm_pageout_defer based on the stale astate which leads to a call to vm_page_pqbatch_submit, which will trip on the KASSERT. Submitted by: mlaier Reviewed by: markj, rlibby Sponsored by: Dell EMC Isilon Differential Revision: https://reviews.freebsd.org/D28563
-rw-r--r--sys/vm/vm_page.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index c36b8cdc5762..20fbbc304490 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -3545,9 +3545,8 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
counter_u64_add(queue_nops, 1);
break;
}
- KASSERT(old.queue != PQ_NONE ||
- (old.flags & PGA_QUEUE_STATE_MASK) == 0,
- ("%s: page %p has unexpected queue state", __func__, m));
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("%s: page %p is unmanaged", __func__, m));
new = old;
if ((old.flags & PGA_DEQUEUE) != 0) {
@@ -3594,8 +3593,6 @@ vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
struct vm_pagequeue *pq;
int domain;
- KASSERT((m->oflags & VPO_UNMANAGED) == 0,
- ("page %p is unmanaged", m));
KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
domain = vm_page_domain(m);