aboutsummaryrefslogtreecommitdiff
path: root/lib/libthr
diff options
context:
space:
mode:
authorDavid Xu <davidxu@FreeBSD.org>2008-05-29 07:57:33 +0000
committerDavid Xu <davidxu@FreeBSD.org>2008-05-29 07:57:33 +0000
commit850f4d66cb1cde64fbe1ee9f1719bc23585e907e (patch)
treea58be2d79c9e47b30162c6c3b4617b4bf05fdecd /lib/libthr
parent9dea35a10a4bba6e01f6a2916d56645150a041cf (diff)
downloadsrc-850f4d66cb1cde64fbe1ee9f1719bc23585e907e.tar.gz
src-850f4d66cb1cde64fbe1ee9f1719bc23585e907e.zip
- Reduce function call overhead for uncontended case.
- Remove unused flags MUTEX_FLAGS_* and their code. - Check validity of the timeout parameter in mutex_self_lock().
Notes
Notes: svn path=/head/; revision=179411
Diffstat (limited to 'lib/libthr')
-rw-r--r--lib/libthr/thread/thr_init.c3
-rw-r--r--lib/libthr/thread/thr_mutex.c308
-rw-r--r--lib/libthr/thread/thr_private.h10
3 files changed, 104 insertions, 217 deletions
diff --git a/lib/libthr/thread/thr_init.c b/lib/libthr/thread/thr_init.c
index edcef80a850f..f96bba9c617a 100644
--- a/lib/libthr/thread/thr_init.c
+++ b/lib/libthr/thread/thr_init.c
@@ -89,8 +89,7 @@ struct pthread_attr _pthread_attr_default = {
struct pthread_mutex_attr _pthread_mutexattr_default = {
.m_type = PTHREAD_MUTEX_DEFAULT,
.m_protocol = PTHREAD_PRIO_NONE,
- .m_ceiling = 0,
- .m_flags = 0
+ .m_ceiling = 0
};
/* Default condition variable attributes: */
diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c
index 0f593f0b733c..62d7ac68a56a 100644
--- a/lib/libthr/thread/thr_mutex.c
+++ b/lib/libthr/thread/thr_mutex.c
@@ -51,12 +51,12 @@
(m)->m_qe.tqe_next = NULL; \
} while (0)
#define MUTEX_ASSERT_IS_OWNED(m) do { \
- if ((m)->m_qe.tqe_prev == NULL) \
+ if (__predict_false((m)->m_qe.tqe_prev == NULL))\
PANIC("mutex is not on list"); \
} while (0)
#define MUTEX_ASSERT_NOT_OWNED(m) do { \
- if (((m)->m_qe.tqe_prev != NULL) || \
- ((m)->m_qe.tqe_next != NULL)) \
+ if (__predict_false((m)->m_qe.tqe_prev != NULL || \
+ (m)->m_qe.tqe_next != NULL)) \
PANIC("mutex is on list"); \
} while (0)
#else
@@ -95,9 +95,13 @@ static int mutex_self_lock(pthread_mutex_t,
static int mutex_unlock_common(pthread_mutex_t *);
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
+__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
__weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
+__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
+__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
+__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
/* Single underscore versions provided for libc internal usage: */
/* No difference between libc and application usage of these: */
@@ -108,15 +112,17 @@ __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
+__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
+__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
static int
mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *mutex_attr, int private,
+ const pthread_mutexattr_t *mutex_attr,
void *(calloc_cb)(size_t, size_t))
{
const struct pthread_mutex_attr *attr;
@@ -139,9 +145,6 @@ mutex_init(pthread_mutex_t *mutex,
pmutex->m_type = attr->m_type;
pmutex->m_owner = NULL;
- pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
- if (private)
- pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
pmutex->m_count = 0;
pmutex->m_refcount = 0;
pmutex->m_spinloops = 0;
@@ -180,24 +183,7 @@ init_static(struct pthread *thread, pthread_mutex_t *mutex)
THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
if (*mutex == NULL)
- ret = mutex_init(mutex, NULL, 0, calloc);
- else
- ret = 0;
-
- THR_LOCK_RELEASE(thread, &_mutex_static_lock);
-
- return (ret);
-}
-
-static int
-init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
-{
- int ret;
-
- THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
-
- if (*mutex == NULL)
- ret = mutex_init(mutex, NULL, 1, calloc);
+ ret = mutex_init(mutex, NULL, calloc);
else
ret = 0;
@@ -219,17 +205,10 @@ set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
}
int
-_pthread_mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *mutex_attr)
-{
- return mutex_init(mutex, mutex_attr, 1, calloc);
-}
-
-int
__pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr)
{
- return mutex_init(mutex, mutex_attr, 0, calloc);
+ return mutex_init(mutex, mutex_attr, calloc);
}
/* This function is used internally by malloc. */
@@ -240,12 +219,11 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
static const struct pthread_mutex_attr attr = {
.m_type = PTHREAD_MUTEX_NORMAL,
.m_protocol = PTHREAD_PRIO_NONE,
- .m_ceiling = 0,
- .m_flags = 0
+ .m_ceiling = 0
};
static const struct pthread_mutex_attr *pattr = &attr;
- return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, 0, calloc_cb);
+ return mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb);
}
void
@@ -319,7 +297,6 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
return (ret);
}
-
#define ENQUEUE_MUTEX(curthread, m) \
do { \
(m)->m_owner = curthread; \
@@ -368,124 +345,95 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
return (mutex_trylock_common(curthread, mutex));
}
-int
-_pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
- struct pthread *curthread = _get_curthread();
- int ret;
-
- /*
- * If the mutex is statically initialized, perform the dynamic
- * initialization marking the mutex private (delete safe):
- */
- if (__predict_false(*mutex == NULL)) {
- ret = init_static_private(curthread, mutex);
- if (__predict_false(ret))
- return (ret);
- }
- return (mutex_trylock_common(curthread, mutex));
-}
-
static int
-mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
+mutex_lock_sleep(struct pthread *curthread, pthread_mutex_t m,
const struct timespec * abstime)
{
struct timespec ts, ts2;
- struct pthread_mutex *m;
uint32_t id;
int ret;
int count;
id = TID(curthread);
- m = *mutex;
- ret = _thr_umutex_trylock2(&m->m_lock, id);
- if (ret == 0) {
- ENQUEUE_MUTEX(curthread, m);
- } else if (m->m_owner == curthread) {
- ret = mutex_self_lock(m, abstime);
- } else {
- /*
- * For adaptive mutexes, spin for a bit in the expectation
- * that if the application requests this mutex type then
- * the lock is likely to be released quickly and it is
- * faster than entering the kernel
- */
- if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
- goto sleep_in_kernel;
-
- if (!_thr_is_smp)
- goto yield_loop;
-
- count = m->m_spinloops;
- while (count--) {
- if (m->m_lock.m_owner == UMUTEX_UNOWNED) {
- ret = _thr_umutex_trylock2(&m->m_lock, id);
- if (ret == 0)
- goto done;
- }
- CPU_SPINWAIT;
- }
+ if (__predict_false(m->m_owner == curthread))
+ return mutex_self_lock(m, abstime);
-yield_loop:
- count = m->m_yieldloops;
- while (count--) {
- _sched_yield();
+ /*
+ * For adaptive mutexes, spin for a bit in the expectation
+ * that if the application requests this mutex type then
+ * the lock is likely to be released quickly and it is
+ * faster than entering the kernel
+ */
+ if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
+ goto sleep_in_kernel;
+
+ if (!_thr_is_smp)
+ goto yield_loop;
+
+ count = m->m_spinloops;
+ while (count--) {
+ if (m->m_lock.m_owner == UMUTEX_UNOWNED) {
ret = _thr_umutex_trylock2(&m->m_lock, id);
if (ret == 0)
goto done;
}
+ CPU_SPINWAIT;
+ }
-sleep_in_kernel:
- if (abstime == NULL) {
- ret = __thr_umutex_lock(&m->m_lock);
- } else if (__predict_false(
- abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= 1000000000)) {
- ret = EINVAL;
- } else {
- clock_gettime(CLOCK_REALTIME, &ts);
- TIMESPEC_SUB(&ts2, abstime, &ts);
- ret = __thr_umutex_timedlock(&m->m_lock, &ts2);
- /*
- * Timed out wait is not restarted if
- * it was interrupted, not worth to do it.
- */
- if (ret == EINTR)
- ret = ETIMEDOUT;
- }
-done:
+yield_loop:
+ count = m->m_yieldloops;
+ while (count--) {
+ _sched_yield();
+ ret = _thr_umutex_trylock2(&m->m_lock, id);
if (ret == 0)
- ENQUEUE_MUTEX(curthread, m);
+ goto done;
+ }
+
+sleep_in_kernel:
+ if (abstime == NULL) {
+ ret = __thr_umutex_lock(&m->m_lock);
+ } else if (__predict_false(
+ abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)) {
+ ret = EINVAL;
+ } else {
+ clock_gettime(CLOCK_REALTIME, &ts);
+ TIMESPEC_SUB(&ts2, abstime, &ts);
+ ret = __thr_umutex_timedlock(&m->m_lock, &ts2);
+ /*
+ * Timed out wait is not restarted if
+ * it was interrupted, not worth to do it.
+ */
+ if (ret == EINTR)
+ ret = ETIMEDOUT;
}
+done:
+ if (ret == 0)
+ ENQUEUE_MUTEX(curthread, m);
return (ret);
}
-int
-__pthread_mutex_lock(pthread_mutex_t *m)
+static inline int
+mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m,
+ const struct timespec * abstime)
{
- struct pthread *curthread;
+ uint32_t id;
int ret;
- _thr_check_init();
-
- curthread = _get_curthread();
-
- /*
- * If the mutex is statically initialized, perform the dynamic
- * initialization:
- */
- if (__predict_false(*m == NULL)) {
- ret = init_static(curthread, m);
- if (__predict_false(ret))
- return (ret);
- }
- return (mutex_lock_common(curthread, m, NULL));
+ id = TID(curthread);
+ ret = _thr_umutex_trylock2(&m->m_lock, id);
+ if (ret == 0)
+ ENQUEUE_MUTEX(curthread, m);
+ else
+ ret = mutex_lock_sleep(curthread, m, abstime);
+ return (ret);
}
int
-_pthread_mutex_lock(pthread_mutex_t *m)
+__pthread_mutex_lock(pthread_mutex_t *mutex)
{
struct pthread *curthread;
+ struct pthread_mutex *m;
int ret;
_thr_check_init();
@@ -494,20 +442,22 @@ _pthread_mutex_lock(pthread_mutex_t *m)
/*
* If the mutex is statically initialized, perform the dynamic
- * initialization marking it private (delete safe):
+ * initialization:
*/
- if (__predict_false(*m == NULL)) {
- ret = init_static_private(curthread, m);
+ if (__predict_false((m = *mutex) == NULL)) {
+ ret = init_static(curthread, mutex);
if (__predict_false(ret))
return (ret);
+ m = *mutex;
}
return (mutex_lock_common(curthread, m, NULL));
}
int
-__pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
+__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
{
struct pthread *curthread;
+ struct pthread_mutex *m;
int ret;
_thr_check_init();
@@ -518,32 +468,11 @@ __pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
* If the mutex is statically initialized, perform the dynamic
* initialization:
*/
- if (__predict_false(*m == NULL)) {
- ret = init_static(curthread, m);
- if (__predict_false(ret))
- return (ret);
- }
- return (mutex_lock_common(curthread, m, abstime));
-}
-
-int
-_pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *abstime)
-{
- struct pthread *curthread;
- int ret;
-
- _thr_check_init();
-
- curthread = _get_curthread();
-
- /*
- * If the mutex is statically initialized, perform the dynamic
- * initialization marking it private (delete safe):
- */
- if (__predict_false(*m == NULL)) {
- ret = init_static_private(curthread, m);
+ if (__predict_false((m = *mutex) == NULL)) {
+ ret = init_static(curthread, mutex);
if (__predict_false(ret))
return (ret);
+ m = *mutex;
}
return (mutex_lock_common(curthread, m, abstime));
}
@@ -559,7 +488,7 @@ _mutex_cv_lock(pthread_mutex_t *m, int count)
{
int ret;
- ret = mutex_lock_common(_get_curthread(), m, NULL);
+ ret = mutex_lock_common(_get_curthread(), *m, NULL);
if (ret == 0) {
(*m)->m_refcount--;
(*m)->m_count += count;
@@ -605,10 +534,15 @@ mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
case PTHREAD_MUTEX_ERRORCHECK:
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (abstime) {
- clock_gettime(CLOCK_REALTIME, &ts1);
- TIMESPEC_SUB(&ts2, abstime, &ts1);
- __sys_nanosleep(&ts2, NULL);
- ret = ETIMEDOUT;
+ if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000) {
+ ret = EINVAL;
+ } else {
+ clock_gettime(CLOCK_REALTIME, &ts1);
+ TIMESPEC_SUB(&ts2, abstime, &ts1);
+ __sys_nanosleep(&ts2, NULL);
+ ret = ETIMEDOUT;
+ }
} else {
/*
* POSIX specifies that mutexes should return
@@ -625,10 +559,15 @@ mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
*/
ret = 0;
if (abstime) {
- clock_gettime(CLOCK_REALTIME, &ts1);
- TIMESPEC_SUB(&ts2, abstime, &ts1);
- __sys_nanosleep(&ts2, NULL);
- ret = ETIMEDOUT;
+ if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000) {
+ ret = EINVAL;
+ } else {
+ clock_gettime(CLOCK_REALTIME, &ts1);
+ TIMESPEC_SUB(&ts2, abstime, &ts1);
+ __sys_nanosleep(&ts2, NULL);
+ ret = ETIMEDOUT;
+ }
} else {
ts1.tv_sec = 30;
ts1.tv_nsec = 0;
@@ -726,17 +665,6 @@ _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
return (0);
}
-void
-_mutex_unlock_private(pthread_t pthread)
-{
- struct pthread_mutex *m, *m_next;
-
- TAILQ_FOREACH_SAFE(m, &pthread->mutexq, m_qe, m_next) {
- if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
- _pthread_mutex_unlock(&m);
- }
-}
-
int
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
int *prioceiling)
@@ -800,21 +728,6 @@ _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
}
int
-_pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
-{
- struct pthread *curthread = _get_curthread();
- int ret;
-
- if (__predict_false(*mutex == NULL)) {
- ret = init_static_private(curthread, mutex);
- if (__predict_false(ret))
- return (ret);
- }
- (*mutex)->m_spinloops = count;
- return (0);
-}
-
-int
__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
{
struct pthread *curthread = _get_curthread();
@@ -839,21 +752,6 @@ _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
}
int
-_pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
-{
- struct pthread *curthread = _get_curthread();
- int ret;
-
- if (__predict_false(*mutex == NULL)) {
- ret = init_static_private(curthread, mutex);
- if (__predict_false(ret))
- return (ret);
- }
- (*mutex)->m_yieldloops = count;
- return (0);
-}
-
-int
__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
{
struct pthread *curthread = _get_curthread();
diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h
index dbbdda4aee75..e84ba232058a 100644
--- a/lib/libthr/thread/thr_private.h
+++ b/lib/libthr/thread/thr_private.h
@@ -118,7 +118,6 @@ struct pthread_mutex {
struct umutex m_lock;
enum pthread_mutextype m_type;
struct pthread *m_owner;
- int m_flags;
int m_count;
int m_refcount;
int m_spinloops;
@@ -129,18 +128,10 @@ struct pthread_mutex {
TAILQ_ENTRY(pthread_mutex) m_qe;
};
-/*
- * Flags for mutexes.
- */
-#define MUTEX_FLAGS_PRIVATE 0x01
-#define MUTEX_FLAGS_INITED 0x02
-#define MUTEX_FLAGS_BUSY 0x04
-
struct pthread_mutex_attr {
enum pthread_mutextype m_type;
int m_protocol;
int m_ceiling;
- int m_flags;
};
#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
@@ -619,7 +610,6 @@ int _mutex_cv_lock(pthread_mutex_t *, int count) __hidden;
int _mutex_cv_unlock(pthread_mutex_t *, int *count) __hidden;
int _mutex_reinit(pthread_mutex_t *) __hidden;
void _mutex_fork(struct pthread *curthread) __hidden;
-void _mutex_unlock_private(struct pthread *) __hidden;
void _libpthread_init(struct pthread *) __hidden;
struct pthread *_thr_alloc(struct pthread *) __hidden;
void _thread_exit(const char *, int, const char *) __hidden __dead2;