aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavide Italiano <davide@FreeBSD.org>2013-09-20 23:06:21 +0000
committerDavide Italiano <davide@FreeBSD.org>2013-09-20 23:06:21 +0000
commit7faf4d90e884a567477da91241f1c2b76f148272 (patch)
tree0f8acc1728a85e0911253a1fa7cfe9c10d2c05b4
parent566a5f502062abb65b03ae1410a792d69b23dd46 (diff)
downloadsrc-7faf4d90e884a567477da91241f1c2b76f148272.tar.gz
src-7faf4d90e884a567477da91241f1c2b76f148272.zip
Fix lc_lock/lc_unlock() support for rmlocks held in shared mode. With
current lock classes KPI it was really difficult because there was no way to pass an rmtracker object to the lock/unlock routines. In order to accomplish the task, modify the aforementioned functions so that they can return (or pass as argument) an uinptr_t, which is in the rm case used to hold a pointer to struct rm_priotracker for current thread. As an added bonus, this fixes rm_sleep() in the rm shared case, which right now can communicate priotracker structure between lc_unlock()/lc_lock(). Suggested by: jhb Reviewed by: jhb Approved by: re (delphij)
Notes
Notes: svn path=/head/; revision=255745
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c2
-rw-r--r--sys/kern/kern_condvar.c5
-rw-r--r--sys/kern/kern_lock.c8
-rw-r--r--sys/kern/kern_mutex.c16
-rw-r--r--sys/kern/kern_rmlock.c61
-rw-r--r--sys/kern/kern_rwlock.c8
-rw-r--r--sys/kern/kern_sx.c8
-rw-r--r--sys/kern/kern_synch.c3
-rw-r--r--sys/sys/lock.h15
9 files changed, 79 insertions, 47 deletions
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 8e5eac879d24..2794f3874a10 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -2027,6 +2027,7 @@ pmc_allocate_owner_descriptor(struct proc *p)
/* allocate space for N pointers and one descriptor struct */
po = malloc(sizeof(struct pmc_owner), M_PMC, M_WAITOK|M_ZERO);
po->po_owner = p;
+ LIST_INIT(&po->po_pmcs);
LIST_INSERT_HEAD(poh, po, po_next); /* insert into hash table */
TAILQ_INIT(&po->po_logbuffers);
@@ -2152,6 +2153,7 @@ pmc_allocate_pmc_descriptor(void)
struct pmc *pmc;
pmc = malloc(sizeof(struct pmc), M_PMC, M_WAITOK|M_ZERO);
+ LIST_INIT(&pmc->pm_targets);
PMCDBG(PMC,ALL,1, "allocate-pmc -> pmc=%p", pmc);
diff --git a/sys/kern/kern_condvar.c b/sys/kern/kern_condvar.c
index 483ea2eae37d..2700a25d477c 100644
--- a/sys/kern/kern_condvar.c
+++ b/sys/kern/kern_condvar.c
@@ -97,7 +97,7 @@ _cv_wait(struct cv *cvp, struct lock_object *lock)
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
- int lock_state;
+ uintptr_t lock_state;
td = curthread;
lock_state = 0;
@@ -214,7 +214,8 @@ _cv_wait_sig(struct cv *cvp, struct lock_object *lock)
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
- int lock_state, rval;
+ uintptr_t lock_state;
+ int rval;
td = curthread;
lock_state = 0;
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 87dca63e0bd0..74a5b19dc559 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -142,12 +142,12 @@ static void assert_lockmgr(const struct lock_object *lock, int how);
#ifdef DDB
static void db_show_lockmgr(const struct lock_object *lock);
#endif
-static void lock_lockmgr(struct lock_object *lock, int how);
+static void lock_lockmgr(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_lockmgr(const struct lock_object *lock,
struct thread **owner);
#endif
-static int unlock_lockmgr(struct lock_object *lock);
+static uintptr_t unlock_lockmgr(struct lock_object *lock);
struct lock_class lock_class_lockmgr = {
.lc_name = "lockmgr",
@@ -350,13 +350,13 @@ assert_lockmgr(const struct lock_object *lock, int what)
}
static void
-lock_lockmgr(struct lock_object *lock, int how)
+lock_lockmgr(struct lock_object *lock, uintptr_t how)
{
panic("lockmgr locks do not support sleep interlocking");
}
-static int
+static uintptr_t
unlock_lockmgr(struct lock_object *lock)
{
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index cd1ed7dbf4ea..e61a187319f9 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -101,14 +101,14 @@ static void assert_mtx(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_mtx(const struct lock_object *lock);
#endif
-static void lock_mtx(struct lock_object *lock, int how);
-static void lock_spin(struct lock_object *lock, int how);
+static void lock_mtx(struct lock_object *lock, uintptr_t how);
+static void lock_spin(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_mtx(const struct lock_object *lock,
struct thread **owner);
#endif
-static int unlock_mtx(struct lock_object *lock);
-static int unlock_spin(struct lock_object *lock);
+static uintptr_t unlock_mtx(struct lock_object *lock);
+static uintptr_t unlock_spin(struct lock_object *lock);
/*
* Lock classes for sleep and spin mutexes.
@@ -154,20 +154,20 @@ assert_mtx(const struct lock_object *lock, int what)
}
void
-lock_mtx(struct lock_object *lock, int how)
+lock_mtx(struct lock_object *lock, uintptr_t how)
{
mtx_lock((struct mtx *)lock);
}
void
-lock_spin(struct lock_object *lock, int how)
+lock_spin(struct lock_object *lock, uintptr_t how)
{
panic("spin locks can only use msleep_spin");
}
-int
+uintptr_t
unlock_mtx(struct lock_object *lock)
{
struct mtx *m;
@@ -178,7 +178,7 @@ unlock_mtx(struct lock_object *lock)
return (0);
}
-int
+uintptr_t
unlock_spin(struct lock_object *lock)
{
diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c
index ff397eb136af..ec0e7fa5fd8c 100644
--- a/sys/kern/kern_rmlock.c
+++ b/sys/kern/kern_rmlock.c
@@ -77,11 +77,11 @@ static void assert_rm(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_rm(const struct lock_object *lock);
#endif
-static void lock_rm(struct lock_object *lock, int how);
+static void lock_rm(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_rm(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_rm(struct lock_object *lock);
+static uintptr_t unlock_rm(struct lock_object *lock);
struct lock_class lock_class_rm = {
.lc_name = "rm",
@@ -118,34 +118,61 @@ assert_rm(const struct lock_object *lock, int what)
rm_assert((const struct rmlock *)lock, what);
}
-/*
- * These do not support read locks because it would be hard to make
- * the tracker work correctly with the current lock_class API as you
- * would need to have the tracker pointer available when calling
- * rm_rlock() in lock_rm().
- */
static void
-lock_rm(struct lock_object *lock, int how)
+lock_rm(struct lock_object *lock, uintptr_t how)
{
struct rmlock *rm;
+ struct rm_priotracker *tracker;
rm = (struct rmlock *)lock;
- if (how)
+ if (how == 0)
rm_wlock(rm);
-#ifdef INVARIANTS
- else
- panic("lock_rm called in read mode");
-#endif
+ else {
+ tracker = (struct rm_priotracker *)how;
+ rm_rlock(rm, tracker);
+ }
}
-static int
+static uintptr_t
unlock_rm(struct lock_object *lock)
{
+ struct thread *td;
+ struct pcpu *pc;
struct rmlock *rm;
+ struct rm_queue *queue;
+ struct rm_priotracker *tracker;
+ uintptr_t how;
rm = (struct rmlock *)lock;
- rm_wunlock(rm);
- return (1);
+ tracker = NULL;
+ how = 0;
+ rm_assert(rm, RA_LOCKED | RA_NOTRECURSED);
+ if (rm_wowned(rm))
+ rm_wunlock(rm);
+ else {
+ /*
+ * Find the right rm_priotracker structure for curthread.
+ * The guarantee about its uniqueness is given by the fact
+ * we already asserted the lock wasn't recursively acquired.
+ */
+ critical_enter();
+ td = curthread;
+ pc = pcpu_find(curcpu);
+ for (queue = pc->pc_rm_queue.rmq_next;
+ queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
+ tracker = (struct rm_priotracker *)queue;
+ if ((tracker->rmp_rmlock == rm) &&
+ (tracker->rmp_thread == td)) {
+ how = (uintptr_t)tracker;
+ break;
+ }
+ }
+ KASSERT(tracker != NULL,
+ ("rm_priotracker is non-NULL when lock held in read mode"));
+ critical_exit();
+ rm_runlock(rm, tracker);
+ }
+ return (how);
}
#ifdef KDTRACE_HOOKS
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index bd4070484a15..45993f2cf8b1 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -83,11 +83,11 @@ SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
static void db_show_rwlock(const struct lock_object *lock);
#endif
static void assert_rw(const struct lock_object *lock, int what);
-static void lock_rw(struct lock_object *lock, int how);
+static void lock_rw(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_rw(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_rw(struct lock_object *lock);
+static uintptr_t unlock_rw(struct lock_object *lock);
struct lock_class lock_class_rw = {
.lc_name = "rw",
@@ -141,7 +141,7 @@ assert_rw(const struct lock_object *lock, int what)
}
void
-lock_rw(struct lock_object *lock, int how)
+lock_rw(struct lock_object *lock, uintptr_t how)
{
struct rwlock *rw;
@@ -152,7 +152,7 @@ lock_rw(struct lock_object *lock, int how)
rw_rlock(rw);
}
-int
+uintptr_t
unlock_rw(struct lock_object *lock)
{
struct rwlock *rw;
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index ff5d95dcc350..fd9a51fbfbfd 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -116,11 +116,11 @@ static void assert_sx(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_sx(const struct lock_object *lock);
#endif
-static void lock_sx(struct lock_object *lock, int how);
+static void lock_sx(struct lock_object *lock, uintptr_t how);
#ifdef KDTRACE_HOOKS
static int owner_sx(const struct lock_object *lock, struct thread **owner);
#endif
-static int unlock_sx(struct lock_object *lock);
+static uintptr_t unlock_sx(struct lock_object *lock);
struct lock_class lock_class_sx = {
.lc_name = "sx",
@@ -156,7 +156,7 @@ assert_sx(const struct lock_object *lock, int what)
}
void
-lock_sx(struct lock_object *lock, int how)
+lock_sx(struct lock_object *lock, uintptr_t how)
{
struct sx *sx;
@@ -167,7 +167,7 @@ lock_sx(struct lock_object *lock, int how)
sx_slock(sx);
}
-int
+uintptr_t
unlock_sx(struct lock_object *lock)
{
struct sx *sx;
diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c
index b0e19082c914..0a400e9fe4e2 100644
--- a/sys/kern/kern_synch.c
+++ b/sys/kern/kern_synch.c
@@ -157,7 +157,8 @@ _sleep(void *ident, struct lock_object *lock, int priority,
struct thread *td;
struct proc *p;
struct lock_class *class;
- int catch, lock_state, pri, rval, sleepq_flags;
+ uintptr_t lock_state;
+ int catch, pri, rval, sleepq_flags;
WITNESS_SAVE_DECL(lock_witness);
td = curthread;
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index 40dc16bc5cbb..58343b594837 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -56,13 +56,14 @@ struct thread;
*/
struct lock_class {
- const char *lc_name;
- u_int lc_flags;
- void (*lc_assert)(const struct lock_object *lock, int what);
- void (*lc_ddb_show)(const struct lock_object *lock);
- void (*lc_lock)(struct lock_object *lock, int how);
- int (*lc_owner)(const struct lock_object *lock, struct thread **owner);
- int (*lc_unlock)(struct lock_object *lock);
+ const char *lc_name;
+ u_int lc_flags;
+ void (*lc_assert)(const struct lock_object *lock, int what);
+ void (*lc_ddb_show)(const struct lock_object *lock);
+ void (*lc_lock)(struct lock_object *lock, uintptr_t how);
+ int (*lc_owner)(const struct lock_object *lock,
+ struct thread **owner);
+ uintptr_t (*lc_unlock)(struct lock_object *lock);
};
#define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */