diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2020-05-31 22:12:56 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2020-05-31 22:12:56 +0000 |
commit | b081c245fd3c86430ef4e758625f9f11db709341 (patch) | |
tree | fa084cbf2cd7b563eb72ffa347a07ef3bfc3260c /contrib/apr/locks/unix | |
parent | 6fff57758067d6cb1895a21c407e96dfddf1f5e4 (diff) | |
parent | a6c43c64d9419dfa888b1c478e658e3e20a4af11 (diff) |
Update apr to 1.7.0. See contrib/apr/CHANGES for a summary of changes.
MFC after: 2 weeks
X-MFC-With: r361677
Notes
Notes:
svn path=/head/; revision=361678
Diffstat (limited to 'contrib/apr/locks/unix')
-rw-r--r-- | contrib/apr/locks/unix/global_mutex.c | 59 | ||||
-rw-r--r-- | contrib/apr/locks/unix/proc_mutex.c | 952 | ||||
-rw-r--r-- | contrib/apr/locks/unix/thread_cond.c | 32 | ||||
-rw-r--r-- | contrib/apr/locks/unix/thread_mutex.c | 200 |
4 files changed, 1100 insertions, 143 deletions
diff --git a/contrib/apr/locks/unix/global_mutex.c b/contrib/apr/locks/unix/global_mutex.c index 18de7e450d30..02173513b7f2 100644 --- a/contrib/apr/locks/unix/global_mutex.c +++ b/contrib/apr/locks/unix/global_mutex.c @@ -15,6 +15,7 @@ */ #include "apr.h" + #include "apr_strings.h" #include "apr_arch_global_mutex.h" #include "apr_proc_mutex.h" @@ -59,7 +60,7 @@ APR_DECLARE(apr_status_t) apr_global_mutex_create(apr_global_mutex_t **mutex, } #if APR_HAS_THREADS - if (m->proc_mutex->inter_meth->flags & APR_PROCESS_LOCK_MECH_IS_GLOBAL) { + if (m->proc_mutex->meth->flags & APR_PROCESS_LOCK_MECH_IS_GLOBAL) { m->thread_mutex = NULL; /* We don't need a thread lock. */ } else { @@ -141,6 +142,47 @@ APR_DECLARE(apr_status_t) apr_global_mutex_trylock(apr_global_mutex_t *mutex) return rv; } +APR_DECLARE(apr_status_t) apr_global_mutex_timedlock(apr_global_mutex_t *mutex, + apr_interval_time_t timeout) +{ +#if APR_HAS_TIMEDLOCKS + apr_status_t rv; + +#if APR_HAS_THREADS + if (mutex->thread_mutex) { + apr_time_t expiry = 0; + if (timeout > 0) { + expiry = apr_time_now() + timeout; + } + rv = apr_thread_mutex_timedlock(mutex->thread_mutex, timeout); + if (rv != APR_SUCCESS) { + return rv; + } + if (expiry) { + timeout = expiry - apr_time_now(); + if (timeout < 0) { + timeout = 0; + } + } + } +#endif /* APR_HAS_THREADS */ + + rv = apr_proc_mutex_timedlock(mutex->proc_mutex, timeout); + +#if APR_HAS_THREADS + if (rv != APR_SUCCESS) { + if (mutex->thread_mutex) { + (void)apr_thread_mutex_unlock(mutex->thread_mutex); + } + } +#endif /* APR_HAS_THREADS */ + + return rv; +#else /* APR_HAS_TIMEDLOCKS */ + return APR_ENOTIMPL; +#endif +} + APR_DECLARE(apr_status_t) apr_global_mutex_unlock(apr_global_mutex_t *mutex) { apr_status_t rv; @@ -180,9 +222,24 @@ APR_DECLARE(const char *) apr_global_mutex_lockfile(apr_global_mutex_t *mutex) return apr_proc_mutex_lockfile(mutex->proc_mutex); } +APR_DECLARE(apr_lockmech_e) apr_global_mutex_mech(apr_global_mutex_t *mutex) +{ + return apr_proc_mutex_mech(mutex->proc_mutex); +} + APR_DECLARE(const char *) apr_global_mutex_name(apr_global_mutex_t *mutex) { return apr_proc_mutex_name(mutex->proc_mutex); } +APR_PERMS_SET_IMPLEMENT(global_mutex) +{ + apr_status_t rv; + apr_global_mutex_t *mutex = (apr_global_mutex_t *)theglobal_mutex; + + rv = APR_PERMS_SET_FN(proc_mutex)(mutex->proc_mutex, perms, uid, gid); + return rv; +} + APR_POOL_IMPLEMENT_ACCESSOR(global_mutex) + diff --git a/contrib/apr/locks/unix/proc_mutex.c b/contrib/apr/locks/unix/proc_mutex.c index 32012a7613cf..8e2187ff7afe 100644 --- a/contrib/apr/locks/unix/proc_mutex.c +++ b/contrib/apr/locks/unix/proc_mutex.c @@ -19,6 +19,7 @@ #include "apr_arch_proc_mutex.h" #include "apr_arch_file_io.h" /* for apr_mkstemp() */ #include "apr_hash.h" +#include "apr_atomic.h" APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex) { @@ -26,7 +27,7 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex) } #if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || \ - APR_HAS_PROC_PTHREAD_SERIALIZE || APR_HAS_SYSVSEM_SERIALIZE + APR_HAS_SYSVSEM_SERIALIZE static apr_status_t proc_mutex_no_child_init(apr_proc_mutex_t **mutex, apr_pool_t *cont, const char *fname) @@ -35,6 +36,55 @@ static apr_status_t proc_mutex_no_child_init(apr_proc_mutex_t **mutex, } #endif +#if APR_HAS_POSIXSEM_SERIALIZE || APR_HAS_PROC_PTHREAD_SERIALIZE +static apr_status_t proc_mutex_no_perms_set(apr_proc_mutex_t *mutex, + apr_fileperms_t perms, + apr_uid_t uid, + apr_gid_t gid) +{ + return APR_ENOTIMPL; +} +#endif + +#if APR_HAS_FCNTL_SERIALIZE \ + || APR_HAS_FLOCK_SERIALIZE \ + || (APR_HAS_SYSVSEM_SERIALIZE \ + && !defined(HAVE_SEMTIMEDOP)) \ + || (APR_HAS_POSIXSEM_SERIALIZE \ + && !defined(HAVE_SEM_TIMEDWAIT)) \ + || (APR_HAS_PROC_PTHREAD_SERIALIZE \ + && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) \ + && !defined(HAVE_PTHREAD_CONDATTR_SETPSHARED)) +static apr_status_t proc_mutex_spinsleep_timedacquire(apr_proc_mutex_t *mutex, + apr_interval_time_t timeout) +{ +#define SLEEP_TIME apr_time_from_msec(10) + apr_status_t rv; + for (;;) { + rv = apr_proc_mutex_trylock(mutex); + if (!APR_STATUS_IS_EBUSY(rv)) { + if (rv == APR_SUCCESS) { + mutex->curr_locked = 1; + } + break; + } + if (timeout <= 0) { + rv = APR_TIMEUP; + break; + } + if (timeout > SLEEP_TIME) { + apr_sleep(SLEEP_TIME); + timeout -= SLEEP_TIME; + } + else { + apr_sleep(timeout); + timeout = 0; + } + } + return rv; +} +#endif + #if APR_HAS_POSIXSEM_SERIALIZE #ifndef SEM_FAILED @@ -45,7 +95,7 @@ static apr_status_t proc_mutex_posix_cleanup(void *mutex_) { apr_proc_mutex_t *mutex = mutex_; - if (sem_close(mutex->psem_interproc) < 0) { + if (sem_close(mutex->os.psem_interproc) < 0) { return errno; } @@ -74,8 +124,6 @@ static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex, sem_t *psem; char semname[32]; - new_mutex->interproc = apr_palloc(new_mutex->pool, - sizeof(*new_mutex->interproc)); /* * This bogusness is to follow what appears to be the * lowest common denominator in Posix semaphore naming: @@ -134,7 +182,7 @@ static apr_status_t proc_mutex_posix_create(apr_proc_mutex_t *new_mutex, } /* Ahhh. The joys of Posix sems. Predelete it... */ sem_unlink(semname); - new_mutex->psem_interproc = psem; + new_mutex->os.psem_interproc = psem; new_mutex->fname = apr_pstrdup(new_mutex->pool, semname); apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, apr_proc_mutex_cleanup, @@ -147,7 +195,7 @@ static apr_status_t proc_mutex_posix_acquire(apr_proc_mutex_t *mutex) int rc; do { - rc = sem_wait(mutex->psem_interproc); + rc = sem_wait(mutex->os.psem_interproc); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -161,7 +209,7 @@ static apr_status_t proc_mutex_posix_tryacquire(apr_proc_mutex_t *mutex) int rc; do { - rc = sem_trywait(mutex->psem_interproc); + rc = sem_trywait(mutex->os.psem_interproc); } while (rc < 0 && errno == EINTR); if (rc < 0) { if (errno == EAGAIN) { @@ -173,10 +221,41 @@ static apr_status_t proc_mutex_posix_tryacquire(apr_proc_mutex_t *mutex) return APR_SUCCESS; } +#if defined(HAVE_SEM_TIMEDWAIT) +static apr_status_t proc_mutex_posix_timedacquire(apr_proc_mutex_t *mutex, + apr_interval_time_t timeout) +{ + if (timeout <= 0) { + apr_status_t rv = proc_mutex_posix_tryacquire(mutex); + return (rv == APR_EBUSY) ? APR_TIMEUP : rv; + } + else { + int rc; + struct timespec abstime; + + timeout += apr_time_now(); + abstime.tv_sec = apr_time_sec(timeout); + abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ + + do { + rc = sem_timedwait(mutex->os.psem_interproc, &abstime); + } while (rc < 0 && errno == EINTR); + if (rc < 0) { + if (errno == ETIMEDOUT) { + return APR_TIMEUP; + } + return errno; + } + } + mutex->curr_locked = 1; + return APR_SUCCESS; +} +#endif + static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex) { mutex->curr_locked = 0; - if (sem_post(mutex->psem_interproc) < 0) { + if (sem_post(mutex->os.psem_interproc) < 0) { /* any failure is probably fatal, so no big deal to leave * ->curr_locked at 0. */ return errno; @@ -194,9 +273,16 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_posixsem_methods = proc_mutex_posix_create, proc_mutex_posix_acquire, proc_mutex_posix_tryacquire, +#if defined(HAVE_SEM_TIMEDWAIT) + proc_mutex_posix_timedacquire, +#else + proc_mutex_spinsleep_timedacquire, +#endif proc_mutex_posix_release, proc_mutex_posix_cleanup, proc_mutex_no_child_init, + proc_mutex_no_perms_set, + APR_LOCK_POSIXSEM, "posixsem" }; @@ -226,9 +312,9 @@ static apr_status_t proc_mutex_sysv_cleanup(void *mutex_) apr_proc_mutex_t *mutex=mutex_; union semun ick; - if (mutex->interproc->filedes != -1) { + if (mutex->os.crossproc != -1) { ick.val = 0; - semctl(mutex->interproc->filedes, 0, IPC_RMID, ick); + semctl(mutex->os.crossproc, 0, IPC_RMID, ick); } return APR_SUCCESS; } @@ -239,18 +325,17 @@ static apr_status_t proc_mutex_sysv_create(apr_proc_mutex_t *new_mutex, union semun ick; apr_status_t rv; - new_mutex->interproc = apr_palloc(new_mutex->pool, sizeof(*new_mutex->interproc)); - new_mutex->interproc->filedes = semget(IPC_PRIVATE, 1, IPC_CREAT | 0600); - - if (new_mutex->interproc->filedes < 0) { + new_mutex->os.crossproc = semget(IPC_PRIVATE, 1, IPC_CREAT | 0600); + if (new_mutex->os.crossproc == -1) { rv = errno; proc_mutex_sysv_cleanup(new_mutex); return rv; } ick.val = 1; - if (semctl(new_mutex->interproc->filedes, 0, SETVAL, ick) < 0) { + if (semctl(new_mutex->os.crossproc, 0, SETVAL, ick) < 0) { rv = errno; proc_mutex_sysv_cleanup(new_mutex); + new_mutex->os.crossproc = -1; return rv; } new_mutex->curr_locked = 0; @@ -265,7 +350,7 @@ static apr_status_t proc_mutex_sysv_acquire(apr_proc_mutex_t *mutex) int rc; do { - rc = semop(mutex->interproc->filedes, &proc_mutex_op_on, 1); + rc = semop(mutex->os.crossproc, &proc_mutex_op_on, 1); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -279,7 +364,7 @@ static apr_status_t proc_mutex_sysv_tryacquire(apr_proc_mutex_t *mutex) int rc; do { - rc = semop(mutex->interproc->filedes, &proc_mutex_op_try, 1); + rc = semop(mutex->os.crossproc, &proc_mutex_op_try, 1); } while (rc < 0 && errno == EINTR); if (rc < 0) { if (errno == EAGAIN) { @@ -291,13 +376,44 @@ static apr_status_t proc_mutex_sysv_tryacquire(apr_proc_mutex_t *mutex) return APR_SUCCESS; } +#if defined(HAVE_SEMTIMEDOP) +static apr_status_t proc_mutex_sysv_timedacquire(apr_proc_mutex_t *mutex, + apr_interval_time_t timeout) +{ + if (timeout <= 0) { + apr_status_t rv = proc_mutex_sysv_tryacquire(mutex); + return (rv == APR_EBUSY) ? APR_TIMEUP : rv; + } + else { + int rc; + struct timespec reltime; + + reltime.tv_sec = apr_time_sec(timeout); + reltime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ + + do { + rc = semtimedop(mutex->os.crossproc, &proc_mutex_op_on, 1, + &reltime); + } while (rc < 0 && errno == EINTR); + if (rc < 0) { + if (errno == EAGAIN) { + return APR_TIMEUP; + } + return errno; + } + } + mutex->curr_locked = 1; + return APR_SUCCESS; +} +#endif + static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex) { int rc; mutex->curr_locked = 0; do { - rc = semop(mutex->interproc->filedes, &proc_mutex_op_off, 1); + rc = semop(mutex->os.crossproc, &proc_mutex_op_off, 1); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -305,6 +421,24 @@ static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex) return APR_SUCCESS; } +static apr_status_t proc_mutex_sysv_perms_set(apr_proc_mutex_t *mutex, + apr_fileperms_t perms, + apr_uid_t uid, + apr_gid_t gid) +{ + + union semun ick; + struct semid_ds buf; + buf.sem_perm.uid = uid; + buf.sem_perm.gid = gid; + buf.sem_perm.mode = apr_unix_perms2mode(perms); + ick.buf = &buf; + if (semctl(mutex->os.crossproc, 0, IPC_SET, ick) < 0) { + return errno; + } + return APR_SUCCESS; +} + static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods = { #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(SYSVSEM_IS_GLOBAL) @@ -315,9 +449,16 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods = proc_mutex_sysv_create, proc_mutex_sysv_acquire, proc_mutex_sysv_tryacquire, +#if defined(HAVE_SEMTIMEDOP) + proc_mutex_sysv_timedacquire, +#else + proc_mutex_spinsleep_timedacquire, +#endif proc_mutex_sysv_release, proc_mutex_sysv_cleanup, proc_mutex_no_child_init, + proc_mutex_sysv_perms_set, + APR_LOCK_SYSVSEM, "sysvsem" }; @@ -325,36 +466,122 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods = #if APR_HAS_PROC_PTHREAD_SERIALIZE -static apr_status_t proc_mutex_proc_pthread_cleanup(void *mutex_) +#ifndef APR_USE_PROC_PTHREAD_MUTEX_COND +#define APR_USE_PROC_PTHREAD_MUTEX_COND \ + (defined(HAVE_PTHREAD_CONDATTR_SETPSHARED) \ + && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) +#endif + +/* The mmap()ed pthread_interproc is the native pthread_mutex_t followed + * by a refcounter to track children using it. We want to avoid calling + * pthread_mutex_destroy() on the shared mutex area while it is in use by + * another process, because this may mark the shared pthread_mutex_t as + * invalid for everyone, including forked children (unlike "sysvsem" for + * example), causing unexpected errors or deadlocks (PR 49504). So the + * last process (parent or child) referencing the mutex will effectively + * destroy it. + */ +typedef struct { +#define proc_pthread_cast(m) \ + ((proc_pthread_mutex_t *)(m)->os.pthread_interproc) + pthread_mutex_t mutex; +#define proc_pthread_mutex(m) \ + (proc_pthread_cast(m)->mutex) +#if APR_USE_PROC_PTHREAD_MUTEX_COND + pthread_cond_t cond; +#define proc_pthread_mutex_cond(m) \ + (proc_pthread_cast(m)->cond) + apr_int32_t cond_locked; +#define proc_pthread_mutex_cond_locked(m) \ + (proc_pthread_cast(m)->cond_locked) + apr_uint32_t cond_num_waiters; +#define proc_pthread_mutex_cond_num_waiters(m) \ + (proc_pthread_cast(m)->cond_num_waiters) +#define proc_pthread_mutex_is_cond(m) \ + ((m)->pthread_refcounting && proc_pthread_mutex_cond_locked(m) != -1) +#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ + apr_uint32_t refcount; +#define proc_pthread_mutex_refcount(m) \ + (proc_pthread_cast(m)->refcount) +} proc_pthread_mutex_t; + + +static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex) +{ + if (mutex->pthread_refcounting) { + apr_atomic_inc32(&proc_pthread_mutex_refcount(mutex)); + return 1; + } + return 0; +} + +static APR_INLINE int proc_pthread_mutex_dec(apr_proc_mutex_t *mutex) +{ + if (mutex->pthread_refcounting) { + return apr_atomic_dec32(&proc_pthread_mutex_refcount(mutex)); + } + return 0; +} + +static apr_status_t proc_pthread_mutex_unref(void *mutex_) { apr_proc_mutex_t *mutex=mutex_; apr_status_t rv; +#if APR_USE_PROC_PTHREAD_MUTEX_COND + if (proc_pthread_mutex_is_cond(mutex)) { + mutex->curr_locked = 0; + } + else +#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ if (mutex->curr_locked == 1) { - if ((rv = pthread_mutex_unlock(mutex->pthread_interproc))) { + if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif return rv; } } - /* curr_locked is set to -1 until the mutex has been created */ - if (mutex->curr_locked != -1) { - if ((rv = pthread_mutex_destroy(mutex->pthread_interproc))) { + if (!proc_pthread_mutex_dec(mutex)) { +#if APR_USE_PROC_PTHREAD_MUTEX_COND + if (proc_pthread_mutex_is_cond(mutex) && + (rv = pthread_cond_destroy(&proc_pthread_mutex_cond(mutex)))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif return rv; } +#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ + + if ((rv = pthread_mutex_destroy(&proc_pthread_mutex(mutex)))) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + return rv; + } + } + return APR_SUCCESS; +} + +static apr_status_t proc_mutex_pthread_cleanup(void *mutex_) +{ + apr_proc_mutex_t *mutex=mutex_; + apr_status_t rv; + + /* curr_locked is set to -1 until the mutex has been created */ + if (mutex->curr_locked != -1) { + if ((rv = proc_pthread_mutex_unref(mutex))) { + return rv; + } } - if (munmap((caddr_t)mutex->pthread_interproc, sizeof(pthread_mutex_t))) { + if (munmap(mutex->os.pthread_interproc, sizeof(proc_pthread_mutex_t))) { return errno; } return APR_SUCCESS; } -static apr_status_t proc_mutex_proc_pthread_create(apr_proc_mutex_t *new_mutex, - const char *fname) +static apr_status_t proc_mutex_pthread_create(apr_proc_mutex_t *new_mutex, + const char *fname) { apr_status_t rv; int fd; @@ -365,42 +592,50 @@ static apr_status_t proc_mutex_proc_pthread_create(apr_proc_mutex_t *new_mutex, return errno; } - new_mutex->pthread_interproc = (pthread_mutex_t *)mmap( - (caddr_t) 0, - sizeof(pthread_mutex_t), - PROT_READ | PROT_WRITE, MAP_SHARED, - fd, 0); - if (new_mutex->pthread_interproc == (pthread_mutex_t *) (caddr_t) -1) { + new_mutex->os.pthread_interproc = mmap(NULL, sizeof(proc_pthread_mutex_t), + PROT_READ | PROT_WRITE, MAP_SHARED, + fd, 0); + if (new_mutex->os.pthread_interproc == MAP_FAILED) { + new_mutex->os.pthread_interproc = NULL; + rv = errno; close(fd); - return errno; + return rv; } close(fd); + new_mutex->pthread_refcounting = 1; new_mutex->curr_locked = -1; /* until the mutex has been created */ +#if APR_USE_PROC_PTHREAD_MUTEX_COND + proc_pthread_mutex_cond_locked(new_mutex) = -1; +#endif if ((rv = pthread_mutexattr_init(&mattr))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif - proc_mutex_proc_pthread_cleanup(new_mutex); + proc_mutex_pthread_cleanup(new_mutex); return rv; } if ((rv = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif - proc_mutex_proc_pthread_cleanup(new_mutex); + proc_mutex_pthread_cleanup(new_mutex); pthread_mutexattr_destroy(&mattr); return rv; } +#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) #ifdef HAVE_PTHREAD_MUTEX_ROBUST - if ((rv = pthread_mutexattr_setrobust_np(&mattr, - PTHREAD_MUTEX_ROBUST_NP))) { + rv = pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST); +#else + rv = pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP); +#endif + if (rv) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif - proc_mutex_proc_pthread_cleanup(new_mutex); + proc_mutex_pthread_cleanup(new_mutex); pthread_mutexattr_destroy(&mattr); return rv; } @@ -408,28 +643,29 @@ static apr_status_t proc_mutex_proc_pthread_create(apr_proc_mutex_t *new_mutex, #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif - proc_mutex_proc_pthread_cleanup(new_mutex); + proc_mutex_pthread_cleanup(new_mutex); pthread_mutexattr_destroy(&mattr); return rv; } -#endif /* HAVE_PTHREAD_MUTEX_ROBUST */ +#endif /* HAVE_PTHREAD_MUTEX_ROBUST[_NP] */ - if ((rv = pthread_mutex_init(new_mutex->pthread_interproc, &mattr))) { + if ((rv = pthread_mutex_init(&proc_pthread_mutex(new_mutex), &mattr))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif - proc_mutex_proc_pthread_cleanup(new_mutex); + proc_mutex_pthread_cleanup(new_mutex); pthread_mutexattr_destroy(&mattr); return rv; } + proc_pthread_mutex_refcount(new_mutex) = 1; /* first/parent reference */ new_mutex->curr_locked = 0; /* mutex created now */ if ((rv = pthread_mutexattr_destroy(&mattr))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif - proc_mutex_proc_pthread_cleanup(new_mutex); + proc_mutex_pthread_cleanup(new_mutex); return rv; } @@ -440,81 +676,321 @@ static apr_status_t proc_mutex_proc_pthread_create(apr_proc_mutex_t *new_mutex, return APR_SUCCESS; } -static apr_status_t proc_mutex_proc_pthread_acquire(apr_proc_mutex_t *mutex) +static apr_status_t proc_mutex_pthread_child_init(apr_proc_mutex_t **mutex, + apr_pool_t *pool, + const char *fname) +{ + (*mutex)->curr_locked = 0; + if (proc_pthread_mutex_inc(*mutex)) { + apr_pool_cleanup_register(pool, *mutex, proc_pthread_mutex_unref, + apr_pool_cleanup_null); + } + return APR_SUCCESS; +} + +static apr_status_t proc_mutex_pthread_acquire_ex(apr_proc_mutex_t *mutex, + apr_interval_time_t timeout) { apr_status_t rv; - if ((rv = pthread_mutex_lock(mutex->pthread_interproc))) { -#ifdef HAVE_ZOS_PTHREADS - rv = errno; +#if APR_USE_PROC_PTHREAD_MUTEX_COND + if (proc_pthread_mutex_is_cond(mutex)) { + if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; #endif +#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) + /* Okay, our owner died. Let's try to make it consistent again. */ + if (rv == EOWNERDEAD) { + proc_pthread_mutex_dec(mutex); #ifdef HAVE_PTHREAD_MUTEX_ROBUST - /* Okay, our owner died. Let's try to make it consistent again. */ - if (rv == EOWNERDEAD) { - pthread_mutex_consistent_np(mutex->pthread_interproc); + pthread_mutex_consistent(&proc_pthread_mutex(mutex)); +#else + pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); +#endif + } + else +#endif + return rv; } - else + + if (!proc_pthread_mutex_cond_locked(mutex)) { + rv = APR_SUCCESS; + } + else if (!timeout) { + rv = APR_TIMEUP; + } + else { + struct timespec abstime; + + if (timeout > 0) { + timeout += apr_time_now(); + abstime.tv_sec = apr_time_sec(timeout); + abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ + } + + proc_pthread_mutex_cond_num_waiters(mutex)++; + do { + if (timeout < 0) { + rv = pthread_cond_wait(&proc_pthread_mutex_cond(mutex), + &proc_pthread_mutex(mutex)); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + break; + } + } + else { + rv = pthread_cond_timedwait(&proc_pthread_mutex_cond(mutex), + &proc_pthread_mutex(mutex), + &abstime); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + if (rv == ETIMEDOUT) { + rv = APR_TIMEUP; + } + break; + } + } + } while (proc_pthread_mutex_cond_locked(mutex)); + proc_pthread_mutex_cond_num_waiters(mutex)--; + } + if (rv != APR_SUCCESS) { + pthread_mutex_unlock(&proc_pthread_mutex(mutex)); + return rv; + } + + proc_pthread_mutex_cond_locked(mutex) = 1; + + rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif return rv; + } + } + else +#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ + { + if (timeout < 0) { + rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + } + } + else if (!timeout) { + rv = pthread_mutex_trylock(&proc_pthread_mutex(mutex)); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + if (rv == EBUSY) { + return APR_TIMEUP; + } + } + } + else +#if defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) + { + struct timespec abstime; + + timeout += apr_time_now(); + abstime.tv_sec = apr_time_sec(timeout); + abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ + + rv = pthread_mutex_timedlock(&proc_pthread_mutex(mutex), &abstime); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + if (rv == ETIMEDOUT) { + return APR_TIMEUP; + } + } + } + if (rv) { +#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) + /* Okay, our owner died. Let's try to make it consistent again. */ + if (rv == EOWNERDEAD) { + proc_pthread_mutex_dec(mutex); +#ifdef HAVE_PTHREAD_MUTEX_ROBUST + pthread_mutex_consistent(&proc_pthread_mutex(mutex)); #else - return rv; + pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); +#endif + } + else +#endif + return rv; + } +#else /* !HAVE_PTHREAD_MUTEX_TIMEDLOCK */ + return proc_mutex_spinsleep_timedacquire(mutex, timeout); #endif } + mutex->curr_locked = 1; return APR_SUCCESS; } -static apr_status_t proc_mutex_proc_pthread_tryacquire(apr_proc_mutex_t *mutex) +static apr_status_t proc_mutex_pthread_acquire(apr_proc_mutex_t *mutex) +{ + return proc_mutex_pthread_acquire_ex(mutex, -1); +} + +static apr_status_t proc_mutex_pthread_tryacquire(apr_proc_mutex_t *mutex) +{ + apr_status_t rv = proc_mutex_pthread_acquire_ex(mutex, 0); + return (rv == APR_TIMEUP) ? APR_EBUSY : rv; +} + +static apr_status_t proc_mutex_pthread_timedacquire(apr_proc_mutex_t *mutex, + apr_interval_time_t timeout) +{ + return proc_mutex_pthread_acquire_ex(mutex, (timeout <= 0) ? 0 : timeout); +} + +static apr_status_t proc_mutex_pthread_release(apr_proc_mutex_t *mutex) { apr_status_t rv; - - if ((rv = pthread_mutex_trylock(mutex->pthread_interproc))) { + +#if APR_USE_PROC_PTHREAD_MUTEX_COND + if (proc_pthread_mutex_is_cond(mutex)) { + if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) { #ifdef HAVE_ZOS_PTHREADS - rv = errno; + rv = errno; #endif - if (rv == EBUSY) { - return APR_EBUSY; - } +#if defined(HAVE_PTHREAD_MUTEX_ROBUST) || defined(HAVE_PTHREAD_MUTEX_ROBUST_NP) + /* Okay, our owner died. Let's try to make it consistent again. */ + if (rv == EOWNERDEAD) { + proc_pthread_mutex_dec(mutex); #ifdef HAVE_PTHREAD_MUTEX_ROBUST - /* Okay, our owner died. Let's try to make it consistent again. */ - if (rv == EOWNERDEAD) { - pthread_mutex_consistent_np(mutex->pthread_interproc); + pthread_mutex_consistent(&proc_pthread_mutex(mutex)); +#else + pthread_mutex_consistent_np(&proc_pthread_mutex(mutex)); +#endif + } + else +#endif + return rv; + } + + if (!proc_pthread_mutex_cond_locked(mutex)) { + rv = APR_EINVAL; + } + else if (!proc_pthread_mutex_cond_num_waiters(mutex)) { rv = APR_SUCCESS; } - else + else { + rv = pthread_cond_signal(&proc_pthread_mutex_cond(mutex)); +#ifdef HAVE_ZOS_PTHREADS + if (rv) { + rv = errno; + } +#endif + } + if (rv != APR_SUCCESS) { + pthread_mutex_unlock(&proc_pthread_mutex(mutex)); return rv; -#else - return rv; + } + + proc_pthread_mutex_cond_locked(mutex) = 0; + } +#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */ + + mutex->curr_locked = 0; + if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; #endif + return rv; } - mutex->curr_locked = 1; - return rv; + + return APR_SUCCESS; } -static apr_status_t proc_mutex_proc_pthread_release(apr_proc_mutex_t *mutex) +static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods = +{ + APR_PROCESS_LOCK_MECH_IS_GLOBAL, + proc_mutex_pthread_create, + proc_mutex_pthread_acquire, + proc_mutex_pthread_tryacquire, + proc_mutex_pthread_timedacquire, + proc_mutex_pthread_release, + proc_mutex_pthread_cleanup, + proc_mutex_pthread_child_init, + proc_mutex_no_perms_set, + APR_LOCK_PROC_PTHREAD, + "pthread" +}; + +#if APR_USE_PROC_PTHREAD_MUTEX_COND +static apr_status_t proc_mutex_pthread_cond_create(apr_proc_mutex_t *new_mutex, + const char *fname) { apr_status_t rv; + pthread_condattr_t cattr; - mutex->curr_locked = 0; - if ((rv = pthread_mutex_unlock(mutex->pthread_interproc))) { + rv = proc_mutex_pthread_create(new_mutex, fname); + if (rv != APR_SUCCESS) { + return rv; + } + + if ((rv = pthread_condattr_init(&cattr))) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + apr_pool_cleanup_run(new_mutex->pool, new_mutex, + apr_proc_mutex_cleanup); + return rv; + } + if ((rv = pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED))) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + pthread_condattr_destroy(&cattr); + apr_pool_cleanup_run(new_mutex->pool, new_mutex, + apr_proc_mutex_cleanup); + return rv; + } + if ((rv = pthread_cond_init(&proc_pthread_mutex_cond(new_mutex), + &cattr))) { #ifdef HAVE_ZOS_PTHREADS rv = errno; #endif + pthread_condattr_destroy(&cattr); + apr_pool_cleanup_run(new_mutex->pool, new_mutex, + apr_proc_mutex_cleanup); return rv; } + pthread_condattr_destroy(&cattr); + + proc_pthread_mutex_cond_locked(new_mutex) = 0; + proc_pthread_mutex_cond_num_waiters(new_mutex) = 0; + return APR_SUCCESS; } -static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods = +static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_cond_methods = { APR_PROCESS_LOCK_MECH_IS_GLOBAL, - proc_mutex_proc_pthread_create, - proc_mutex_proc_pthread_acquire, - proc_mutex_proc_pthread_tryacquire, - proc_mutex_proc_pthread_release, - proc_mutex_proc_pthread_cleanup, - proc_mutex_no_child_init, + proc_mutex_pthread_cond_create, + proc_mutex_pthread_acquire, + proc_mutex_pthread_tryacquire, + proc_mutex_pthread_timedacquire, + proc_mutex_pthread_release, + proc_mutex_pthread_cleanup, + proc_mutex_pthread_child_init, + proc_mutex_no_perms_set, + APR_LOCK_PROC_PTHREAD, "pthread" }; +#endif #endif @@ -541,7 +1017,7 @@ static void proc_mutex_fcntl_setup(void) static apr_status_t proc_mutex_fcntl_cleanup(void *mutex_) { - apr_status_t status; + apr_status_t status = APR_SUCCESS; apr_proc_mutex_t *mutex=mutex_; if (mutex->curr_locked == 1) { @@ -550,7 +1026,16 @@ static apr_status_t proc_mutex_fcntl_cleanup(void *mutex_) return status; } - return apr_file_close(mutex->interproc); + if (mutex->interproc) { + status = apr_file_close(mutex->interproc); + } + if (!mutex->interproc_closing + && mutex->os.crossproc != -1 + && close(mutex->os.crossproc) == -1 + && status == APR_SUCCESS) { + status = errno; + } + return status; } static apr_status_t proc_mutex_fcntl_create(apr_proc_mutex_t *new_mutex, @@ -576,6 +1061,8 @@ static apr_status_t proc_mutex_fcntl_create(apr_proc_mutex_t *new_mutex, return rv; } + new_mutex->os.crossproc = new_mutex->interproc->filedes; + new_mutex->interproc_closing = 1; new_mutex->curr_locked = 0; unlink(new_mutex->fname); apr_pool_cleanup_register(new_mutex->pool, @@ -590,7 +1077,7 @@ static apr_status_t proc_mutex_fcntl_acquire(apr_proc_mutex_t *mutex) int rc; do { - rc = fcntl(mutex->interproc->filedes, F_SETLKW, &proc_mutex_lock_it); + rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_lock_it); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -604,7 +1091,7 @@ static apr_status_t proc_mutex_fcntl_tryacquire(apr_proc_mutex_t *mutex) int rc; do { - rc = fcntl(mutex->interproc->filedes, F_SETLK, &proc_mutex_lock_it); + rc = fcntl(mutex->os.crossproc, F_SETLK, &proc_mutex_lock_it); } while (rc < 0 && errno == EINTR); if (rc < 0) { #if FCNTL_TRYACQUIRE_EACCES @@ -626,7 +1113,7 @@ static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex) mutex->curr_locked=0; do { - rc = fcntl(mutex->interproc->filedes, F_SETLKW, &proc_mutex_unlock_it); + rc = fcntl(mutex->os.crossproc, F_SETLKW, &proc_mutex_unlock_it); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -634,6 +1121,22 @@ static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex) return APR_SUCCESS; } +static apr_status_t proc_mutex_fcntl_perms_set(apr_proc_mutex_t *mutex, + apr_fileperms_t perms, + apr_uid_t uid, + apr_gid_t gid) +{ + + if (mutex->fname) { + if (!(perms & APR_FPROT_GSETID)) + gid = -1; + if (fchown(mutex->os.crossproc, uid, gid) < 0) { + return errno; + } + } + return APR_SUCCESS; +} + static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods = { #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FCNTL_IS_GLOBAL) @@ -644,9 +1147,12 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods = proc_mutex_fcntl_create, proc_mutex_fcntl_acquire, proc_mutex_fcntl_tryacquire, + proc_mutex_spinsleep_timedacquire, proc_mutex_fcntl_release, proc_mutex_fcntl_cleanup, proc_mutex_no_child_init, + proc_mutex_fcntl_perms_set, + APR_LOCK_FCNTL, "fcntl" }; @@ -658,7 +1164,7 @@ static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *); static apr_status_t proc_mutex_flock_cleanup(void *mutex_) { - apr_status_t status; + apr_status_t status = APR_SUCCESS; apr_proc_mutex_t *mutex=mutex_; if (mutex->curr_locked == 1) { @@ -667,10 +1173,18 @@ static apr_status_t proc_mutex_flock_cleanup(void *mutex_) return status; } if (mutex->interproc) { /* if it was opened properly */ - apr_file_close(mutex->interproc); + status = apr_file_close(mutex->interproc); } - unlink(mutex->fname); - return APR_SUCCESS; + if (!mutex->interproc_closing + && mutex->os.crossproc != -1 + && close(mutex->os.crossproc) == -1 + && status == APR_SUCCESS) { + status = errno; + } + if (mutex->fname) { + unlink(mutex->fname); + } + return status; } static apr_status_t proc_mutex_flock_create(apr_proc_mutex_t *new_mutex, @@ -694,8 +1208,11 @@ static apr_status_t proc_mutex_flock_create(apr_proc_mutex_t *new_mutex, if (rv != APR_SUCCESS) { proc_mutex_flock_cleanup(new_mutex); - return errno; + return rv; } + + new_mutex->os.crossproc = new_mutex->interproc->filedes; + new_mutex->interproc_closing = 1; new_mutex->curr_locked = 0; apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex, apr_proc_mutex_cleanup, @@ -708,7 +1225,7 @@ static apr_status_t proc_mutex_flock_acquire(apr_proc_mutex_t *mutex) int rc; do { - rc = flock(mutex->interproc->filedes, LOCK_EX); + rc = flock(mutex->os.crossproc, LOCK_EX); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -722,7 +1239,7 @@ static apr_status_t proc_mutex_flock_tryacquire(apr_proc_mutex_t *mutex) int rc; do { - rc = flock(mutex->interproc->filedes, LOCK_EX | LOCK_NB); + rc = flock(mutex->os.crossproc, LOCK_EX | LOCK_NB); } while (rc < 0 && errno == EINTR); if (rc < 0) { if (errno == EWOULDBLOCK || errno == EAGAIN) { @@ -740,7 +1257,7 @@ static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *mutex) mutex->curr_locked = 0; do { - rc = flock(mutex->interproc->filedes, LOCK_UN); + rc = flock(mutex->os.crossproc, LOCK_UN); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; @@ -755,23 +1272,45 @@ static apr_status_t proc_mutex_flock_child_init(apr_proc_mutex_t **mutex, apr_proc_mutex_t *new_mutex; int rv; - new_mutex = (apr_proc_mutex_t *)apr_palloc(pool, sizeof(apr_proc_mutex_t)); - - memcpy(new_mutex, *mutex, sizeof *new_mutex); - new_mutex->pool = pool; if (!fname) { fname = (*mutex)->fname; + if (!fname) { + return APR_SUCCESS; + } } + + new_mutex = (apr_proc_mutex_t *)apr_pmemdup(pool, *mutex, + sizeof(apr_proc_mutex_t)); + new_mutex->pool = pool; new_mutex->fname = apr_pstrdup(pool, fname); rv = apr_file_open(&new_mutex->interproc, new_mutex->fname, APR_FOPEN_WRITE, 0, new_mutex->pool); if (rv != APR_SUCCESS) { return rv; } + new_mutex->os.crossproc = new_mutex->interproc->filedes; + new_mutex->interproc_closing = 1; + *mutex = new_mutex; return APR_SUCCESS; } +static apr_status_t proc_mutex_flock_perms_set(apr_proc_mutex_t *mutex, + apr_fileperms_t perms, + apr_uid_t uid, + apr_gid_t gid) +{ + + if (mutex->fname) { + if (!(perms & APR_FPROT_GSETID)) + gid = -1; + if (fchown(mutex->os.crossproc, uid, gid) < 0) { + return errno; + } + } + return APR_SUCCESS; +} + static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods = { #if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS || defined(FLOCK_IS_GLOBAL) @@ -782,9 +1321,12 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods = proc_mutex_flock_create, proc_mutex_flock_acquire, proc_mutex_flock_tryacquire, + proc_mutex_spinsleep_timedacquire, proc_mutex_flock_release, proc_mutex_flock_cleanup, proc_mutex_flock_child_init, + proc_mutex_flock_perms_set, + APR_LOCK_FLOCK, "flock" }; @@ -801,55 +1343,169 @@ void apr_proc_mutex_unix_setup_lock(void) #endif } -static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, apr_lockmech_e mech) +static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, + apr_lockmech_e mech, + apr_os_proc_mutex_t *ospmutex) { +#if APR_HAS_PROC_PTHREAD_SERIALIZE + new_mutex->os.pthread_interproc = NULL; +#endif +#if APR_HAS_POSIXSEM_SERIALIZE + new_mutex->os.psem_interproc = NULL; +#endif +#if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE + new_mutex->os.crossproc = -1; + +#if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE + new_mutex->interproc = NULL; + new_mutex->interproc_closing = 0; +#endif +#endif + switch (mech) { case APR_LOCK_FCNTL: #if APR_HAS_FCNTL_SERIALIZE - new_mutex->inter_meth = &mutex_fcntl_methods; + new_mutex->meth = &mutex_fcntl_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } #else return APR_ENOTIMPL; #endif break; case APR_LOCK_FLOCK: #if APR_HAS_FLOCK_SERIALIZE - new_mutex->inter_meth = &mutex_flock_methods; + new_mutex->meth = &mutex_flock_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } #else return APR_ENOTIMPL; #endif break; case APR_LOCK_SYSVSEM: #if APR_HAS_SYSVSEM_SERIALIZE - new_mutex->inter_meth = &mutex_sysv_methods; + new_mutex->meth = &mutex_sysv_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } #else return APR_ENOTIMPL; #endif break; case APR_LOCK_POSIXSEM: #if APR_HAS_POSIXSEM_SERIALIZE - new_mutex->inter_meth = &mutex_posixsem_methods; + new_mutex->meth = &mutex_posixsem_methods; + if (ospmutex) { + if (ospmutex->psem_interproc == NULL) { + return APR_EINVAL; + } + new_mutex->os.psem_interproc = ospmutex->psem_interproc; + } #else return APR_ENOTIMPL; #endif break; case APR_LOCK_PROC_PTHREAD: #if APR_HAS_PROC_PTHREAD_SERIALIZE - new_mutex->inter_meth = &mutex_proc_pthread_methods; + new_mutex->meth = &mutex_proc_pthread_methods; + if (ospmutex) { + if (ospmutex->pthread_interproc == NULL) { + return APR_EINVAL; + } + new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; + } #else return APR_ENOTIMPL; #endif break; + case APR_LOCK_DEFAULT_TIMED: +#if APR_HAS_PROC_PTHREAD_SERIALIZE \ + && (APR_USE_PROC_PTHREAD_MUTEX_COND \ + || defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) \ + && defined(HAVE_PTHREAD_MUTEX_ROBUST) +#if APR_USE_PROC_PTHREAD_MUTEX_COND + new_mutex->meth = &mutex_proc_pthread_cond_methods; +#else + new_mutex->meth = &mutex_proc_pthread_methods; +#endif + if (ospmutex) { + if (ospmutex->pthread_interproc == NULL) { + return APR_EINVAL; + } + new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; + } + break; +#elif APR_HAS_SYSVSEM_SERIALIZE && defined(HAVE_SEMTIMEDOP) + new_mutex->meth = &mutex_sysv_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } + break; +#elif APR_HAS_POSIXSEM_SERIALIZE && defined(HAVE_SEM_TIMEDWAIT) + new_mutex->meth = &mutex_posixsem_methods; + if (ospmutex) { + if (ospmutex->psem_interproc == NULL) { + return APR_EINVAL; + } + new_mutex->os.psem_interproc = ospmutex->psem_interproc; + } + break; +#endif + /* fall trough */ case APR_LOCK_DEFAULT: #if APR_USE_FLOCK_SERIALIZE - new_mutex->inter_meth = &mutex_flock_methods; + new_mutex->meth = &mutex_flock_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } #elif APR_USE_SYSVSEM_SERIALIZE - new_mutex->inter_meth = &mutex_sysv_methods; + new_mutex->meth = &mutex_sysv_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } #elif APR_USE_FCNTL_SERIALIZE - new_mutex->inter_meth = &mutex_fcntl_methods; + new_mutex->meth = &mutex_fcntl_methods; + if (ospmutex) { + if (ospmutex->crossproc == -1) { + return APR_EINVAL; + } + new_mutex->os.crossproc = ospmutex->crossproc; + } #elif APR_USE_PROC_PTHREAD_SERIALIZE - new_mutex->inter_meth = &mutex_proc_pthread_methods; + new_mutex->meth = &mutex_proc_pthread_methods; + if (ospmutex) { + if (ospmutex->pthread_interproc == NULL) { + return APR_EINVAL; + } + new_mutex->os.pthread_interproc = ospmutex->pthread_interproc; + } #elif APR_USE_POSIXSEM_SERIALIZE - new_mutex->inter_meth = &mutex_posixsem_methods; + new_mutex->meth = &mutex_posixsem_methods; + if (ospmutex) { + if (ospmutex->psem_interproc == NULL) { + return APR_EINVAL; + } + new_mutex->os.psem_interproc = ospmutex->psem_interproc; + } #else return APR_ENOTIMPL; #endif @@ -865,10 +1521,10 @@ APR_DECLARE(const char *) apr_proc_mutex_defname(void) apr_status_t rv; apr_proc_mutex_t mutex; - if ((rv = proc_mutex_choose_method(&mutex, APR_LOCK_DEFAULT)) != APR_SUCCESS) { + if ((rv = proc_mutex_choose_method(&mutex, APR_LOCK_DEFAULT, + NULL)) != APR_SUCCESS) { return "unknown"; } - mutex.meth = mutex.inter_meth; return apr_proc_mutex_name(&mutex); } @@ -877,12 +1533,11 @@ static apr_status_t proc_mutex_create(apr_proc_mutex_t *new_mutex, apr_lockmech_ { apr_status_t rv; - if ((rv = proc_mutex_choose_method(new_mutex, mech)) != APR_SUCCESS) { + if ((rv = proc_mutex_choose_method(new_mutex, mech, + NULL)) != APR_SUCCESS) { return rv; } - new_mutex->meth = new_mutex->inter_meth; - if ((rv = new_mutex->meth->create(new_mutex, fname)) != APR_SUCCESS) { return rv; } @@ -925,6 +1580,16 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex) return mutex->meth->tryacquire(mutex); } +APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex, + apr_interval_time_t timeout) +{ +#if APR_HAS_TIMEDLOCKS + return mutex->meth->timedacquire(mutex, timeout); +#else + return APR_ENOTIMPL; +#endif +} + APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex) { return mutex->meth->release(mutex); @@ -935,6 +1600,11 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_cleanup(void *mutex) return ((apr_proc_mutex_t *)mutex)->meth->cleanup(mutex); } +APR_DECLARE(apr_lockmech_e) apr_proc_mutex_mech(apr_proc_mutex_t *mutex) +{ + return mutex->meth->mech; +} + APR_DECLARE(const char *) apr_proc_mutex_name(apr_proc_mutex_t *mutex) { return mutex->meth->name; @@ -957,45 +1627,69 @@ APR_DECLARE(const char *) apr_proc_mutex_lockfile(apr_proc_mutex_t *mutex) return NULL; } +APR_PERMS_SET_IMPLEMENT(proc_mutex) +{ + apr_proc_mutex_t *mutex = (apr_proc_mutex_t *)theproc_mutex; + return mutex->meth->perms_set(mutex, perms, uid, gid); +} + APR_POOL_IMPLEMENT_ACCESSOR(proc_mutex) /* Implement OS-specific accessors defined in apr_portable.h */ -APR_DECLARE(apr_status_t) apr_os_proc_mutex_get(apr_os_proc_mutex_t *ospmutex, - apr_proc_mutex_t *pmutex) +APR_DECLARE(apr_status_t) apr_os_proc_mutex_get_ex(apr_os_proc_mutex_t *ospmutex, + apr_proc_mutex_t *pmutex, + apr_lockmech_e *mech) { -#if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE || APR_HAS_POSIXSEM_SERIALIZE - if (pmutex->interproc) { - ospmutex->crossproc = pmutex->interproc->filedes; - } - else { - ospmutex->crossproc = -1; + *ospmutex = pmutex->os; + if (mech) { + *mech = pmutex->meth->mech; } -#endif -#if APR_HAS_PROC_PTHREAD_SERIALIZE - ospmutex->pthread_interproc = pmutex->pthread_interproc; -#endif return APR_SUCCESS; } -APR_DECLARE(apr_status_t) apr_os_proc_mutex_put(apr_proc_mutex_t **pmutex, +APR_DECLARE(apr_status_t) apr_os_proc_mutex_get(apr_os_proc_mutex_t *ospmutex, + apr_proc_mutex_t *pmutex) +{ + return apr_os_proc_mutex_get_ex(ospmutex, pmutex, NULL); +} + +APR_DECLARE(apr_status_t) apr_os_proc_mutex_put_ex(apr_proc_mutex_t **pmutex, apr_os_proc_mutex_t *ospmutex, + apr_lockmech_e mech, + int register_cleanup, apr_pool_t *pool) { + apr_status_t rv; if (pool == NULL) { return APR_ENOPOOL; } + if ((*pmutex) == NULL) { (*pmutex) = (apr_proc_mutex_t *)apr_pcalloc(pool, sizeof(apr_proc_mutex_t)); (*pmutex)->pool = pool; } -#if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE || APR_HAS_POSIXSEM_SERIALIZE - apr_os_file_put(&(*pmutex)->interproc, &ospmutex->crossproc, 0, pool); -#endif -#if APR_HAS_PROC_PTHREAD_SERIALIZE - (*pmutex)->pthread_interproc = ospmutex->pthread_interproc; + rv = proc_mutex_choose_method(*pmutex, mech, ospmutex); +#if APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE + if (rv == APR_SUCCESS) { + rv = apr_os_file_put(&(*pmutex)->interproc, &(*pmutex)->os.crossproc, + 0, pool); + } #endif - return APR_SUCCESS; + + if (rv == APR_SUCCESS && register_cleanup) { + apr_pool_cleanup_register(pool, *pmutex, apr_proc_mutex_cleanup, + apr_pool_cleanup_null); + } + return rv; +} + +APR_DECLARE(apr_status_t) apr_os_proc_mutex_put(apr_proc_mutex_t **pmutex, + apr_os_proc_mutex_t *ospmutex, + apr_pool_t *pool) +{ + return apr_os_proc_mutex_put_ex(pmutex, ospmutex, APR_LOCK_DEFAULT, + 0, pool); } diff --git a/contrib/apr/locks/unix/thread_cond.c b/contrib/apr/locks/unix/thread_cond.c index db7dd4f0d973..3c8e3170a0dc 100644 --- a/contrib/apr/locks/unix/thread_cond.c +++ b/contrib/apr/locks/unix/thread_cond.c @@ -79,21 +79,31 @@ APR_DECLARE(apr_status_t) apr_thread_cond_timedwait(apr_thread_cond_t *cond, apr_interval_time_t timeout) { apr_status_t rv; - apr_time_t then; - struct timespec abstime; + if (timeout < 0) { + rv = pthread_cond_wait(&cond->cond, &mutex->mutex); +#ifdef HAVE_ZOS_PTHREADS + if (rv) { + rv = errno; + } +#endif + } + else { + apr_time_t then; + struct timespec abstime; - then = apr_time_now() + timeout; - abstime.tv_sec = apr_time_sec(then); - abstime.tv_nsec = apr_time_usec(then) * 1000; /* nanoseconds */ + then = apr_time_now() + timeout; + abstime.tv_sec = apr_time_sec(then); + abstime.tv_nsec = apr_time_usec(then) * 1000; /* nanoseconds */ - rv = pthread_cond_timedwait(&cond->cond, &mutex->mutex, &abstime); + rv = pthread_cond_timedwait(&cond->cond, &mutex->mutex, &abstime); #ifdef HAVE_ZOS_PTHREADS - if (rv) { - rv = errno; - } + if (rv) { + rv = errno; + } #endif - if (ETIMEDOUT == rv) { - return APR_TIMEUP; + if (ETIMEDOUT == rv) { + return APR_TIMEUP; + } } return rv; } diff --git a/contrib/apr/locks/unix/thread_mutex.c b/contrib/apr/locks/unix/thread_mutex.c index 73fd1e146210..f7cae5145781 100644 --- a/contrib/apr/locks/unix/thread_mutex.c +++ b/contrib/apr/locks/unix/thread_mutex.c @@ -77,6 +77,19 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex, return rv; } +#ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK + if (flags & APR_THREAD_MUTEX_TIMED) { + rv = apr_thread_cond_create(&new_mutex->cond, pool); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + pthread_mutex_destroy(&new_mutex->mutex); + return rv; + } + } +#endif + apr_pool_cleanup_register(new_mutex->pool, new_mutex, thread_mutex_cleanup, apr_pool_cleanup_null); @@ -89,13 +102,45 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex) { apr_status_t rv; + if (mutex->cond) { + apr_status_t rv2; + + rv = pthread_mutex_lock(&mutex->mutex); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + return rv; + } + + if (mutex->locked) { + mutex->num_waiters++; + rv = apr_thread_cond_wait(mutex->cond, mutex); + mutex->num_waiters--; + } + else { + mutex->locked = 1; + } + + rv2 = pthread_mutex_unlock(&mutex->mutex); + if (rv2 && !rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#else + rv = rv2; +#endif + } + + return rv; + } + rv = pthread_mutex_lock(&mutex->mutex); #ifdef HAVE_ZOS_PTHREADS if (rv) { rv = errno; } #endif - + return rv; } @@ -103,6 +148,36 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex) { apr_status_t rv; + if (mutex->cond) { + apr_status_t rv2; + + rv = pthread_mutex_lock(&mutex->mutex); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + return rv; + } + + if (mutex->locked) { + rv = APR_EBUSY; + } + else { + mutex->locked = 1; + } + + rv2 = pthread_mutex_unlock(&mutex->mutex); + if (rv2) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#else + rv = rv2; +#endif + } + + return rv; + } + rv = pthread_mutex_trylock(&mutex->mutex); if (rv) { #ifdef HAVE_ZOS_PTHREADS @@ -114,10 +189,121 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex) return APR_SUCCESS; } +APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex, + apr_interval_time_t timeout) +{ + apr_status_t rv = APR_ENOTIMPL; +#if APR_HAS_TIMEDLOCKS + +#ifdef HAVE_PTHREAD_MUTEX_TIMEDLOCK + if (timeout <= 0) { + rv = pthread_mutex_trylock(&mutex->mutex); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + if (rv == EBUSY) { + rv = APR_TIMEUP; + } + } + } + else { + struct timespec abstime; + + timeout += apr_time_now(); + abstime.tv_sec = apr_time_sec(timeout); + abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */ + + rv = pthread_mutex_timedlock(&mutex->mutex, &abstime); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + if (rv == ETIMEDOUT) { + rv = APR_TIMEUP; + } + } + } + +#else /* HAVE_PTHREAD_MUTEX_TIMEDLOCK */ + + if (mutex->cond) { + rv = pthread_mutex_lock(&mutex->mutex); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + return rv; + } + + if (mutex->locked) { + if (timeout <= 0) { + rv = APR_TIMEUP; + } + else { + mutex->num_waiters++; + do { + rv = apr_thread_cond_timedwait(mutex->cond, mutex, + timeout); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + break; + } + } while (mutex->locked); + mutex->num_waiters--; + } + if (rv) { + pthread_mutex_unlock(&mutex->mutex); + return rv; + } + } + + mutex->locked = 1; + + rv = pthread_mutex_unlock(&mutex->mutex); + if (rv) { +#ifdef HAVE_ZOS_PTHREADS + rv = errno; +#endif + return rv; + } + } + +#endif /* HAVE_PTHREAD_MUTEX_TIMEDLOCK */ + +#endif /* APR_HAS_TIMEDLOCKS */ + return rv; +} + APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex) { apr_status_t status; + if (mutex->cond) { + status = pthread_mutex_lock(&mutex->mutex); + if (status) { +#ifdef HAVE_ZOS_PTHREADS + status = errno; +#endif + return status; + } + + if (!mutex->locked) { + status = APR_EINVAL; + } + else if (mutex->num_waiters) { + status = apr_thread_cond_signal(mutex->cond); + } + if (status) { + pthread_mutex_unlock(&mutex->mutex); + return status; + } + + mutex->locked = 0; + } + status = pthread_mutex_unlock(&mutex->mutex); #ifdef HAVE_ZOS_PTHREADS if (status) { @@ -130,7 +316,17 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex) APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex) { - return apr_pool_cleanup_run(mutex->pool, mutex, thread_mutex_cleanup); + apr_status_t rv, rv2 = APR_SUCCESS; + + if (mutex->cond) { + rv2 = apr_thread_cond_destroy(mutex->cond); + } + rv = apr_pool_cleanup_run(mutex->pool, mutex, thread_mutex_cleanup); + if (rv == APR_SUCCESS) { + rv = rv2; + } + + return rv; } APR_POOL_IMPLEMENT_ACCESSOR(thread_mutex) |