aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Becker <becker.greg@att.net>2023-06-27 01:08:29 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2023-06-27 11:56:44 +0000
commita6c0d801ca5934bb9b9cca6870ea7406d5db0641 (patch)
tree52d76cca3edf28b3e56709c7407744b4288da2bb
parent4a402dfe0bc44770c9eac6e58a501e4805e29413 (diff)
downloadsrc-a6c0d801ca5934bb9b9cca6870ea7406d5db0641.tar.gz
src-a6c0d801ca5934bb9b9cca6870ea7406d5db0641.zip
libpthread: allocate rwlocks and spinlocks in dedicated cachelines
Reduces severe performance degradation due to false-sharing. Note that this does not account for hardware which can perform adjacent cacheline prefetch. [mjg: massaged the commit message and the patch to use aligned_alloc instead of malloc] PR: 272238 MFC after: 1 week
-rw-r--r--lib/libthr/thread/thr_pspinlock.c3
-rw-r--r--lib/libthr/thread/thr_rwlock.c5
2 files changed, 6 insertions, 2 deletions
diff --git a/lib/libthr/thread/thr_pspinlock.c b/lib/libthr/thread/thr_pspinlock.c
index 089416a18a07..374fe498034b 100644
--- a/lib/libthr/thread/thr_pspinlock.c
+++ b/lib/libthr/thread/thr_pspinlock.c
@@ -60,7 +60,8 @@ _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
if (lock == NULL)
return (EINVAL);
if (pshared == PTHREAD_PROCESS_PRIVATE) {
- lck = malloc(sizeof(struct pthread_spinlock));
+ lck = aligned_alloc(CACHE_LINE_SIZE,
+ roundup(sizeof(struct pthread_spinlock), CACHE_LINE_SIZE));
if (lck == NULL)
return (ENOMEM);
*lock = lck;
diff --git a/lib/libthr/thread/thr_rwlock.c b/lib/libthr/thread/thr_rwlock.c
index b7526376a9a3..3e8af974fa18 100644
--- a/lib/libthr/thread/thr_rwlock.c
+++ b/lib/libthr/thread/thr_rwlock.c
@@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
#include <errno.h>
#include <limits.h>
#include <stdlib.h>
+#include <string.h>
#include "namespace.h"
#include <pthread.h>
@@ -102,9 +103,11 @@ rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
if (attr == NULL || *attr == NULL ||
(*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
- prwlock = calloc(1, sizeof(struct pthread_rwlock));
+ prwlock = aligned_alloc(CACHE_LINE_SIZE,
+ roundup(sizeof(struct pthread_rwlock), CACHE_LINE_SIZE));
if (prwlock == NULL)
return (ENOMEM);
+ memset(prwlock, 0, sizeof(struct pthread_rwlock));
*rwlock = prwlock;
} else {
prwlock = __thr_pshared_offpage(rwlock, 1);