aboutsummaryrefslogtreecommitdiff
path: root/sys/compat/linuxkpi/common
diff options
context:
space:
mode:
authorVladimir Kondratyev <wulf@FreeBSD.org>2022-01-26 22:27:01 +0000
committerVladimir Kondratyev <wulf@FreeBSD.org>2022-01-26 22:27:01 +0000
commit11ef1d975f61f728a25727fcb89b340b5433713c (patch)
tree59f2b2018c09336e843524167b276fc4c180da08 /sys/compat/linuxkpi/common
parent5a1a07f2a0e10179d38d8112fb5568e2b510b28c (diff)
downloadsrc-11ef1d975f61f728a25727fcb89b340b5433713c.tar.gz
src-11ef1d975f61f728a25727fcb89b340b5433713c.zip
Revert "LinuxKPI: Allow spin_lock_irqsave to be called within a critical section"
This change results in deadlocks on UP systems This reverts commit 7dea0c9e6eba4dc127cd67667c81fa2c250f1024. Requested by: kib, hselasky
Diffstat (limited to 'sys/compat/linuxkpi/common')
-rw-r--r--sys/compat/linuxkpi/common/include/linux/spinlock.h27
1 files changed, 4 insertions, 23 deletions
diff --git a/sys/compat/linuxkpi/common/include/linux/spinlock.h b/sys/compat/linuxkpi/common/include/linux/spinlock.h
index 31d47fa73986..a87cb7180b28 100644
--- a/sys/compat/linuxkpi/common/include/linux/spinlock.h
+++ b/sys/compat/linuxkpi/common/include/linux/spinlock.h
@@ -37,7 +37,6 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kdb.h>
-#include <sys/proc.h>
#include <linux/compiler.h>
#include <linux/rwlock.h>
@@ -118,32 +117,14 @@ typedef struct {
local_bh_disable(); \
} while (0)
-#define __spin_trylock_nested(_l, _n) ({ \
- int __ret; \
- if (SPIN_SKIP()) { \
- __ret = 1; \
- } else { \
- __ret = mtx_trylock_flags(&(_l)->m, MTX_DUPOK); \
- if (likely(__ret != 0)) \
- local_bh_disable(); \
- } \
- __ret; \
-})
-
-#define spin_lock_irqsave(_l, flags) do { \
- (flags) = 0; \
- if (unlikely(curthread->td_critnest != 0)) \
- while (!spin_trylock(_l)) {} \
- else \
- spin_lock(_l); \
+#define spin_lock_irqsave(_l, flags) do { \
+ (flags) = 0; \
+ spin_lock(_l); \
} while (0)
#define spin_lock_irqsave_nested(_l, flags, _n) do { \
(flags) = 0; \
- if (unlikely(curthread->td_critnest != 0)) \
- while (!__spin_trylock_nested(_l, _n)) {} \
- else \
- spin_lock_nested(_l, _n); \
+ spin_lock_nested(_l, _n); \
} while (0)
#define spin_unlock_irqrestore(_l, flags) do { \