aboutsummaryrefslogtreecommitdiff
path: root/libexec/rtld-elf/rtld_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'libexec/rtld-elf/rtld_lock.c')
-rw-r--r--libexec/rtld-elf/rtld_lock.c126
1 files changed, 69 insertions, 57 deletions
diff --git a/libexec/rtld-elf/rtld_lock.c b/libexec/rtld-elf/rtld_lock.c
index e501c03f0722..0c790450dcec 100644
--- a/libexec/rtld-elf/rtld_lock.c
+++ b/libexec/rtld-elf/rtld_lock.c
@@ -1,5 +1,5 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ * SPDX-License-Identifier: BSD-2-Clause
*
* Copyright 1999, 2000 John D. Polstra.
* All rights reserved.
@@ -25,7 +25,6 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09
- * $FreeBSD$
*/
/*
@@ -89,49 +88,39 @@ static uint32_t fsigblock;
static void *
def_lock_create(void)
{
- void *base;
- char *p;
- uintptr_t r;
- Lock *l;
-
- /*
- * Arrange for the lock to occupy its own cache line. First, we
- * optimistically allocate just a cache line, hoping that malloc
- * will give us a well-aligned block of memory. If that doesn't
- * work, we allocate a larger block and take a well-aligned cache
- * line from it.
- */
- base = xmalloc(CACHE_LINE_SIZE);
- p = (char *)base;
- if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
- free(base);
- base = xmalloc(2 * CACHE_LINE_SIZE);
- p = (char *)base;
- if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
- p += CACHE_LINE_SIZE - r;
- }
- l = (Lock *)p;
- l->base = base;
- l->lock = 0;
- return l;
-}
-
-static void
-def_lock_destroy(void *lock)
-{
- Lock *l = (Lock *)lock;
+ void *base;
+ char *p;
+ uintptr_t r;
+ Lock *l;
- free(l->base);
+ /*
+ * Arrange for the lock to occupy its own cache line. First, we
+ * optimistically allocate just a cache line, hoping that malloc
+ * will give us a well-aligned block of memory. If that doesn't
+ * work, we allocate a larger block and take a well-aligned cache
+ * line from it.
+ */
+ base = xmalloc(CACHE_LINE_SIZE);
+ p = base;
+ if ((uintptr_t)p % CACHE_LINE_SIZE != 0) {
+ free(base);
+ base = xmalloc(2 * CACHE_LINE_SIZE);
+ p = base;
+ if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0)
+ p += CACHE_LINE_SIZE - r;
+ }
+ l = (Lock *)p;
+ l->base = base;
+ l->lock = 0;
+ return (l);
}
static void
-def_rlock_acquire(void *lock)
+def_lock_destroy(void *lock)
{
- Lock *l = (Lock *)lock;
+ Lock *l = lock;
- atomic_add_acq_int(&l->lock, RC_INCR);
- while (l->lock & WAFLAG)
- ; /* Spin */
+ free(l->base);
}
static void
@@ -145,24 +134,37 @@ sig_fastunblock(void)
__sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL);
}
+static bool
+def_lock_acquire_set(Lock *l, bool wlock)
+{
+ if (wlock) {
+ if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
+ return (true);
+ } else {
+ atomic_add_acq_int(&l->lock, RC_INCR);
+ if ((l->lock & WAFLAG) == 0)
+ return (true);
+ atomic_add_int(&l->lock, -RC_INCR);
+ }
+ return (false);
+}
+
static void
-def_wlock_acquire(void *lock)
+def_lock_acquire(Lock *l, bool wlock)
{
- Lock *l;
sigset_t tmp_oldsigmask;
- l = (Lock *)lock;
if (ld_fast_sigblock) {
for (;;) {
atomic_add_32(&fsigblock, SIGFASTBLOCK_INC);
- if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
+ if (def_lock_acquire_set(l, wlock))
break;
sig_fastunblock();
}
} else {
for (;;) {
sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
- if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
+ if (def_lock_acquire_set(l, wlock))
break;
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
}
@@ -172,26 +174,35 @@ def_wlock_acquire(void *lock)
}
static void
-def_lock_release(void *lock)
+def_rlock_acquire(void *lock)
{
- Lock *l;
+ def_lock_acquire(lock, false);
+}
- l = (Lock *)lock;
- if ((l->lock & WAFLAG) == 0)
- atomic_add_rel_int(&l->lock, -RC_INCR);
- else {
- atomic_add_rel_int(&l->lock, -WAFLAG);
- if (ld_fast_sigblock)
- sig_fastunblock();
- else if (atomic_fetchadd_int(&wnested, -1) == 1)
- sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
- }
+static void
+def_wlock_acquire(void *lock)
+{
+ def_lock_acquire(lock, true);
+}
+
+static void
+def_lock_release(void *lock)
+{
+ Lock *l = lock;
+
+ atomic_add_rel_int(&l->lock, -((l->lock & WAFLAG) == 0 ?
+ RC_INCR : WAFLAG));
+ if (ld_fast_sigblock)
+ sig_fastunblock();
+ else if (atomic_fetchadd_int(&wnested, -1) == 1)
+ sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
}
static int
def_thread_set_flag(int mask)
{
int old_val = thread_flag;
+
thread_flag |= mask;
return (old_val);
}
@@ -200,6 +211,7 @@ static int
def_thread_clr_flag(int mask)
{
int old_val = thread_flag;
+
thread_flag &= ~mask;
return (old_val);
}
@@ -213,7 +225,7 @@ static struct RtldLockInfo deflockinfo;
static __inline int
thread_mask_set(int mask)
{
- return lockinfo.thread_set_flag(mask);
+ return (lockinfo.thread_set_flag(mask));
}
static __inline void