aboutsummaryrefslogtreecommitdiff
path: root/sys/vm/vm_page.c
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2011-09-28 14:57:50 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2011-09-28 14:57:50 +0000
commitabb9b935ca33fe016fa96582e165e67ed5f4dfd8 (patch)
tree6a5296fc970ddac679f37c4f859ef750f1173a7e /sys/vm/vm_page.c
parent7778ab7e0cc22f0824eb1d1047a7ef8b4785267a (diff)
downloadsrc-abb9b935ca33fe016fa96582e165e67ed5f4dfd8.tar.gz
src-abb9b935ca33fe016fa96582e165e67ed5f4dfd8.zip
Use the trick of performing the atomic operation on the contained aligned
word to handle the dirty mask updates in vm_page_clear_dirty_mask(). Remove the vm page queue lock around vm_page_dirty() call in vm_fault_hold() the sole purpose of which was to protect dirty on architectures which does not provide short or byte-wide atomics. Reviewed by: alc, attilio Tested by: flo (sparc64) MFC after: 2 weeks
Notes
Notes: svn path=/head/; revision=225840
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r--sys/vm/vm_page.c61
1 files changed, 35 insertions, 26 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 341c2383abe3..fe829a3d68ee 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -745,9 +745,9 @@ vm_page_sleep(vm_page_t m, const char *msg)
*
* Set all bits in the page's dirty field.
*
- * The object containing the specified page must be locked if the call is
- * made from the machine-independent layer. If, however, the call is
- * made from the pmap layer, then the page queues lock may be required.
+ * The object containing the specified page must be locked if the
+ * call is made from the machine-independent layer.
+ *
* See vm_page_clear_dirty_mask().
*/
void
@@ -2339,44 +2339,53 @@ vm_page_set_valid(vm_page_t m, int base, int size)
static __inline void
vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
{
+ uintptr_t addr;
+#if PAGE_SIZE < 16384
+ int shift;
+#endif
/*
* If the object is locked and the page is neither VPO_BUSY nor
* PGA_WRITEABLE, then the page's dirty field cannot possibly be
- * set by a concurrent pmap operation.
+ * set by a concurrent pmap operation.
+ *
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0)
m->dirty &= ~pagebits;
else {
-#if defined(__amd64__) || defined(__i386__) || defined(__ia64__)
/*
- * On the aforementioned architectures, the page queues lock
- * is not required by the following read-modify-write
- * operation. The combination of the object's lock and an
- * atomic operation suffice. Moreover, the pmap layer on
- * these architectures can call vm_page_dirty() without
- * holding the page queues lock.
+ * The pmap layer can call vm_page_dirty() without
+ * holding a distinguished lock. The combination of
+ * the object's lock and an atomic operation suffice
+ * to guarantee consistency of the page dirty field.
+ *
+ * For PAGE_SIZE == 32768 case, compiler already
+ * properly aligns the dirty field, so no forcible
+ * alignment is needed. Only require existence of
+ * atomic_clear_64 when page size if 32768.
*/
-#if PAGE_SIZE == 4096
- atomic_clear_char(&m->dirty, pagebits);
-#elif PAGE_SIZE == 8192
- atomic_clear_short(&m->dirty, pagebits);
+ addr = (uintptr_t)&m->dirty;
+#if PAGE_SIZE == 32768
+#error pagebits too short
+ atomic_clear_64((uint64_t *)addr, pagebits);
#elif PAGE_SIZE == 16384
- atomic_clear_int(&m->dirty, pagebits);
-#else
-#error "PAGE_SIZE is not supported."
-#endif
-#else
+ atomic_clear_32((uint32_t *)addr, pagebits);
+#else /* PAGE_SIZE <= 8192 */
/*
- * Otherwise, the page queues lock is required to ensure that
- * a concurrent pmap operation does not set the page's dirty
- * field during the following read-modify-write operation.
+ * Use a trick to perform an 32bit atomic on the
+ * contained aligned word, to not depend on existence
+ * of the atomic_clear_{8, 16}.
*/
- vm_page_lock_queues();
- m->dirty &= ~pagebits;
- vm_page_unlock_queues();
+ shift = addr & (sizeof(uint32_t) - 1);
+#if BYTE_ORDER == BIG_ENDIAN
+ shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
+#else
+ shift *= NBBY;
#endif
+ addr &= ~(sizeof(uint32_t) - 1);
+ atomic_clear_32((uint32_t *)addr, pagebits << shift);
+#endif /* PAGE_SIZE */
}
}