aboutsummaryrefslogtreecommitdiff
path: root/sys/sys/proc.h
diff options
context:
space:
mode:
authorJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:11:15 +0000
committerJeff Roberson <jeff@FreeBSD.org>2019-12-15 21:11:15 +0000
commit61a74c5ccd65d1a00a96779f16eda8c41ff3a426 (patch)
tree0325e01f4affe5d9ef25e68fae1a7cbd5d2ebde9 /sys/sys/proc.h
parent054802650063bea1cb817ef22a887c3116813ba9 (diff)
downloadsrc-61a74c5ccd65d1a00a96779f16eda8c41ff3a426.tar.gz
src-61a74c5ccd65d1a00a96779f16eda8c41ff3a426.zip
schedlock 1/4
Eliminate recursion from most thread_lock consumers. Return from sched_add() without the thread_lock held. This eliminates unnecessary atomics and lock word loads as well as reducing the hold time for scheduler locks. This will eventually allow for lockless remote adds. Discussed with: kib Reviewed by: jhb Tested by: pho Differential Revision: https://reviews.freebsd.org/D22626
Notes
Notes: svn path=/head/; revision=355779
Diffstat (limited to 'sys/sys/proc.h')
-rw-r--r--sys/sys/proc.h25
1 files changed, 21 insertions, 4 deletions
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index 7be1941416be..cd856657f341 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -376,9 +376,13 @@ struct thread0_storage {
};
struct mtx *thread_lock_block(struct thread *);
-void thread_lock_unblock(struct thread *, struct mtx *);
+void thread_lock_block_wait(struct thread *);
void thread_lock_set(struct thread *, struct mtx *);
+void thread_lock_unblock(struct thread *, struct mtx *);
#define THREAD_LOCK_ASSERT(td, type) \
+ mtx_assert((td)->td_lock, (type))
+
+#define THREAD_LOCK_BLOCKED_ASSERT(td, type) \
do { \
struct mtx *__m = (td)->td_lock; \
if (__m != &blocked_lock) \
@@ -388,8 +392,17 @@ do { \
#ifdef INVARIANTS
#define THREAD_LOCKPTR_ASSERT(td, lock) \
do { \
- struct mtx *__m = (td)->td_lock; \
- KASSERT((__m == &blocked_lock || __m == (lock)), \
+ struct mtx *__m; \
+ __m = (td)->td_lock; \
+ KASSERT(__m == (lock), \
+ ("Thread %p lock %p does not match %p", td, __m, (lock))); \
+} while (0)
+
+#define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock) \
+do { \
+ struct mtx *__m; \
+ __m = (td)->td_lock; \
+ KASSERT(__m == (lock) || __m == &blocked_lock, \
("Thread %p lock %p does not match %p", td, __m, (lock))); \
} while (0)
@@ -401,6 +414,7 @@ do { \
} while (0)
#else
#define THREAD_LOCKPTR_ASSERT(td, lock)
+#define THREAD_LOCKPTR_BLOCKED_ASSERT(td, lock)
#define TD_LOCKS_INC(td)
#define TD_LOCKS_DEC(td)
@@ -519,6 +533,9 @@ do { \
#define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED)
#define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD)
+#define TD_CAN_ABORT(td) (TD_ON_SLEEPQ((td)) && \
+ ((td)->td_flags & TDF_SINTR) != 0)
+
#define KTDSTATE(td) \
(((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
@@ -1089,7 +1106,7 @@ int securelevel_ge(struct ucred *cr, int level);
int securelevel_gt(struct ucred *cr, int level);
void sess_hold(struct session *);
void sess_release(struct session *);
-int setrunnable(struct thread *);
+int setrunnable(struct thread *, int);
void setsugid(struct proc *p);
int should_yield(void);
int sigonstack(size_t sp);